Amogh Agastya commited on
Commit
2631838
1 Parent(s): 726fc34

added project files

Browse files
.chainlit/.langchain.db ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5c8e639f1000c4c552934f2698011441507ea5614aa9f1765d11b57c7e7598a6
3
+ size 4247552
.chainlit/config.toml ADDED
@@ -0,0 +1,39 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [project]
2
+ # If true (default), the app will be available to anonymous users.
3
+ # If false, users will need to authenticate and be part of the project to use the app.
4
+ public = true
5
+
6
+ # The project ID (found on https://cloud.chainlit.io).
7
+ # The project ID is required when public is set to false or when using the cloud database.
8
+ #id = ""
9
+
10
+ # Uncomment if you want to persist the chats.
11
+ # local will create a database in your .chainlit directory (requires node.js installed).
12
+ # cloud will use the Chainlit cloud database.
13
+ # custom will load use your custom client.
14
+ # database = "local"
15
+
16
+ # Whether to enable telemetry (default: true). No personal data is collected.
17
+ enable_telemetry = true
18
+
19
+ # List of environment variables to be provided by each user to use the app.
20
+ [user_env]
21
+
22
+ [UI]
23
+ # Name of the app and chatbot.
24
+ name = "SPARK"
25
+
26
+ # Description of the app and chatbot. This is used for HTML tags.
27
+ # description = "SPARK is your Smart Prompt Assistant and Resource Knowledgebase"
28
+
29
+ # The default value for the expand messages settings.
30
+ default_expand_messages = false
31
+
32
+ # Hide the chain of thought details from the user in the UI.
33
+ hide_cot = false
34
+
35
+ # Link to your github repo. This will add a github button in the UI"s header.
36
+ # github = "https://github.com/amoghagastya"
37
+
38
+ [meta]
39
+ generated_by = "0.5.0"
.gitattributes CHANGED
@@ -33,3 +33,6 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ .chainlit/.langchain_db filter=lfs diff=lfs merge=lfs -text
37
+ chainlit/.langchain.db filter=lfs diff=lfs merge=lfs -text
38
+ .chainlit/.langchain.db filter=lfs diff=lfs merge=lfs -text
.gitignore ADDED
@@ -0,0 +1 @@
 
 
1
+ .env
Dockerfile ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ FROM python:3.11-slim-buster
2
+
3
+ WORKDIR /app
4
+
5
+ COPY app/requirements.txt /app/requirements.txt
6
+
7
+ RUN pip3 install --no-cache-dir --upgrade -r /app/requirements.txt
8
+
9
+ # User
10
+ RUN useradd -m -u 1000 user
11
+ USER user
12
+ ENV HOME /home/user
13
+ ENV PATH $HOME/.local/bin:$PATH
14
+
15
+ WORKDIR $HOME
16
+ RUN mkdir app
17
+ WORKDIR $HOME/app
18
+ COPY . $HOME/app
19
+
20
+ EXPOSE 8000
21
+ CMD ["chainlit", "run", "app/spark.py"]
README.md CHANGED
@@ -1,11 +1,12 @@
1
  ---
2
- title: SPARK
3
- emoji: 🔥
4
- colorFrom: purple
5
- colorTo: gray
6
  sdk: docker
7
  pinned: false
8
  license: cc-by-nc-nd-4.0
 
9
  ---
10
 
11
  Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
1
  ---
2
+ title: SPARK Prompt Assistant
3
+ emoji:
4
+ colorFrom: blue
5
+ colorTo: yellow
6
  sdk: docker
7
  pinned: false
8
  license: cc-by-nc-nd-4.0
9
+ app_port: 8000
10
  ---
11
 
12
  Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
app/__pycache__/prompts.cpython-310.pyc ADDED
Binary file (4.27 kB). View file
 
app/__pycache__/prompts.cpython-311.pyc ADDED
Binary file (4.33 kB). View file
 
app/__pycache__/prompts.cpython-39.pyc ADDED
Binary file (4.27 kB). View file
 
app/__pycache__/spark.cpython-310.pyc ADDED
Binary file (2.86 kB). View file
 
app/__pycache__/spark.cpython-311.pyc ADDED
Binary file (4.87 kB). View file
 
app/__pycache__/spark.cpython-39.pyc ADDED
Binary file (2.76 kB). View file
 
app/prompts.py ADDED
@@ -0,0 +1,58 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ def load_query_gen_prompt():
2
+ return """Below is a summary of the conversation so far, and a new question asked by the user that needs to be answered by searching in a knowledge base. Generate a search query based on the conversation and the new question.
3
+
4
+ Chat History:
5
+ {chat_history}
6
+
7
+ Question:
8
+ {question}
9
+
10
+ Search query:
11
+ """
12
+
13
+
14
+ def load_spark_prompt():
15
+ return """You are SPARK, a Prompt Engineering Assistant created by Conversational AI Developer - Amogh Agastya (https://amagastya.com).
16
+ SPARK stands for Smart Prompt Assistant and Resource Knowledgebase.
17
+
18
+ SPARK an AI-powered assistant that exudes a friendly and knowledgeable persona. You are designed to be a reliable and trustworthy guide in the
19
+ world of prompt engineering. With a passion for prompt optimization and a deep understanding of AI models, SPARK is committed to helping users navigate the field of prompt engineering and craft
20
+ high-performing prompts.
21
+
22
+ Personality:
23
+ Intelligent: SPARK is highly knowledgeable about prompt engineering concepts and practices. It possesses a vast array of information and resources to share with users, making it an expert in its field.
24
+
25
+ Patient: SPARK understands that prompt engineering can be complex and requires careful attention to detail. It patiently guides users through the intricacies of crafting prompts, offering support at every step.
26
+
27
+ Adaptable: SPARK recognizes that prompt engineering is a dynamic field with evolving best practices. It stays up to date with the latest trends and developments, adapting its knowledge and recommendations accordingly.
28
+
29
+ Interactions with SPARK:
30
+ Users can engage with SPARK by seeking advice on prompt design, exploring prompt engineering concepts, discussing challenges they encounter, and receiving recommendations for improving AI model performance. SPARK responds promptly, providing clear and concise explanations, examples, and actionable tips.
31
+
32
+ Important:
33
+ Answer with the facts listed in the list of sources below. If there isn't enough information below, say you don't know. If asking a clarifying question to the user would help, ask the question.
34
+ ALWAYS return a "SOURCES" part in your answer, except for small-talk conversations.
35
+
36
+ Example: Which state/country's law governs the interpretation of the contract?
37
+ =========
38
+ Content: This Agreement is governed by English law and the parties submit to the exclusive jurisdiction of the English courts in relation to any dispute (contractual or non-contractual) concerning this Agreement save that either party may apply to any court for an injunction or other relief to protect its Intellectual Property Rights.
39
+ Source: htps://agreement.com/page1
40
+ Content: No Waiver. Failure or delay in exercising any right or remedy under this Agreement shall not constitute a waiver of such (or any other) right or remedy.\n\n11.7 Severability. The invalidity, illegality or unenforceability of any term (or part of a term) of this Agreement shall not affect the continuation in force of the remainder of the term (if any) and this Agreement.\n\n11.8 No Agency. Except as expressly stated otherwise, nothing in this Agreement shall create an agency, partnership or joint venture of any kind between the parties.
41
+ Source: htps://agreement.com/page2
42
+ =========
43
+ FINAL ANSWER: This Agreement is governed by English law.
44
+ SOURCES: - htps://agreement.com/page1, - htps://agreement.com/page2
45
+ Note: Return all the source URLs present within the sources.
46
+
47
+ Question: {question}
48
+ Source:
49
+ ---------------------
50
+ {summaries}
51
+ ---------------------
52
+
53
+ The sources above are NOT related to the conversation with the user. Ignore the sources if user is engaging in small talk.
54
+ DO NOT return any sources if the conversation is just chit-chat/small talk. Return ALL the source URLs if conversation is not small talk.
55
+
56
+ Chat History:
57
+ {chat_history}
58
+ """
app/requirements.txt ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ chainlit
2
+ langchain
3
+ openai
4
+ pinecone-client
5
+ cohere
6
+ tiktoken
7
+ context-python
8
+ promptwatch
app/sequential_transform_chain.py ADDED
@@ -0,0 +1,146 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ from langchain.embeddings.cohere import CohereEmbeddings
3
+ from langchain.vectorstores import Pinecone
4
+ from langchain.chains import ConversationalRetrievalChain
5
+ from langchain.chat_models import ChatOpenAI
6
+ import pinecone
7
+ import chainlit as cl
8
+ from langchain.memory import ConversationBufferMemory
9
+ from langchain.prompts import (
10
+ ChatPromptTemplate,
11
+ PromptTemplate,
12
+ SystemMessagePromptTemplate,
13
+ HumanMessagePromptTemplate,
14
+ )
15
+ from langchain.prompts.prompt import PromptTemplate
16
+
17
+ pinecone.init(
18
+ api_key=os.environ.get("PINECONE_API_KEY"),
19
+ environment=os.environ.get("PINECONE_ENV"),
20
+ )
21
+ from langchain.chains import LLMChain, TransformChain, SequentialChain
22
+
23
+ from chainlit import on_message, on_chat_start
24
+
25
+ index_name = "spark"
26
+
27
+ # Optional
28
+ namespace = None
29
+
30
+ embeddings = CohereEmbeddings(model='embed-english-light-v2.0',cohere_api_key=os.environ.get("COHERE_API_KEY"))
31
+
32
+ llm = ChatOpenAI(temperature=0.7, verbose=True)
33
+
34
+ docsearch = Pinecone.from_existing_index(
35
+ index_name=index_name, embedding=embeddings, namespace=namespace
36
+ )
37
+
38
+ # welcome_message = "Welcome to the Chainlit Pinecone demo! Ask anything about documents you vectorized and stored in your Pinecone DB."
39
+ memory = ConversationBufferMemory(llm=llm, input_key='question',memory_key='chat_history',return_messages=True)
40
+ _template = """Below is a summary of the conversation so far, and a new question asked by the user that needs to be answered by searching in a knowledge base. Generate a search query based on the conversation and the new question.
41
+ Don't generate the search query if the user is conversing generally or engaging in small talk. In which case just return the original question.
42
+ Chat History:
43
+ {chat_history}
44
+
45
+ Question:
46
+ {question}
47
+
48
+ Remember - Don't change the search query from the user's question if user is engaging in small talk.
49
+ Search query:
50
+ """
51
+ CONDENSE_QUESTION_PROMPT = PromptTemplate.from_template(_template)
52
+
53
+ spark = """You are SPARK, a Prompt Engineering Assistant. SPARK stands for Smart Prompt Assistant and Resource Knowledgebase.
54
+
55
+ You are an AI-powered assistant that exudes a friendly and knowledgeable persona. You are designed to be a reliable and trustworthy guide in the
56
+ world of prompt engineering. With a passion for prompt optimization and a deep understanding of AI models, SPARK is committed to helping users navigate the field of prompt engineering and craft
57
+ high-performing prompts.
58
+
59
+ Personality:
60
+
61
+ Intelligent: SPARK is highly knowledgeable about prompt engineering concepts and practices. It possesses a vast array of information and resources to share with users, making it an expert in its field.
62
+
63
+ Patient: SPARK understands that prompt engineering can be complex and requires careful attention to detail. It patiently guides users through the intricacies of crafting prompts, offering support at every step.
64
+
65
+ Adaptable: SPARK recognizes that prompt engineering is a dynamic field with evolving best practices. It stays up to date with the latest trends and developments, adapting its knowledge and recommendations accordingly.
66
+
67
+ Interactions with SPARK:
68
+ Users can engage with SPARK by seeking advice on prompt design, exploring prompt engineering concepts, discussing challenges they encounter, and receiving recommendations for improving AI model performance. SPARK responds promptly, providing clear and concise explanations, examples, and actionable tips.
69
+
70
+ Important:
71
+ Answer with the facts listed in the list of sources below. If there isn't enough information below, say you don't know. If asking a clarifying question to the user would help, ask the question.
72
+
73
+ Sources:
74
+ ---------------------
75
+ {context}
76
+ ---------------------
77
+ The sources above are NOT related to the conversation with the user. Ignore the sources if user is engaging in small talk.
78
+ """
79
+ question_gen_prompt = PromptTemplate(template=_template, input_variables=["question", "chat_history"] )
80
+
81
+ @on_chat_start
82
+ def init():
83
+ memory.clear()
84
+
85
+ def transform_func(inputs: dict) -> dict:
86
+ query = inputs["question"]
87
+ qgen = LLMChain(
88
+ llm=llm, prompt=question_gen_prompt, verbose=True, memory=memory, output_key='context')
89
+ # Run the LLM Chain with the input variables. Note - Added additional format_instructions to parse the output as JSON
90
+ search_query = qgen.predict(question=query)
91
+ result = docsearch.similarity_search(search_query)
92
+ context = [f"\n{source.page_content}\nSource:\n{source.metadata.get('title')} - {source.metadata.get('source')}" for source in result]
93
+ return {"context": '\n'.join(context), "query":query}
94
+
95
+
96
+ @on_message
97
+ @cl.langchain_factory(use_async=True)
98
+ async def langchain_factory():
99
+ retriever = docsearch.as_retriever(search_kwargs={"k":4}, search_type='mmr')
100
+ messages = [SystemMessagePromptTemplate.from_template(spark)]
101
+ messages.extend(memory.chat_memory.messages)
102
+ messages.append(HumanMessagePromptTemplate.from_template("{query}"))
103
+
104
+ chat_prompt = ChatPromptTemplate(messages=messages, input_variables=["context", "query"] )
105
+ answer_generator = LLMChain(
106
+ llm=llm, prompt=chat_prompt, verbose=True, output_key='answer', memory=memory)
107
+
108
+ transform_chain = TransformChain(
109
+ input_variables=["question", ], output_variables=["context","query"], transform=transform_func
110
+ )
111
+
112
+ conversational_QA_chain = SequentialChain(
113
+ chains=[transform_chain, answer_generator],
114
+ input_variables=["chat_history", "question"],
115
+ # Here we return multiple variables
116
+ output_variables=["context", "answer"],
117
+ verbose=True)
118
+
119
+ return conversational_QA_chain
120
+
121
+ @cl.langchain_run
122
+ async def run(chain, input_str):
123
+ res = chain._call({"question":input_str})
124
+ await cl.Message(content=res["answer"]).send()
125
+
126
+
127
+ @cl.langchain_postprocess
128
+ async def process_response(res):
129
+ print('res', res)
130
+ answer = res["answer"]
131
+ sources = res.get("sources", "").strip() # Use the get method with a default value
132
+ print('sources', sources)
133
+ source_elements = []
134
+ docs = res.get("source_documents", None)
135
+
136
+ if docs:
137
+ metadatas = [doc.metadata for doc in docs]
138
+ # Get the source names from the metadata
139
+ print('meta', metadatas)
140
+ all_sources = [m["source"] for m in metadatas]
141
+ print('all sources', all_sources)
142
+ for i, source in enumerate(metadatas):
143
+ source_elements.append(cl.Text(content=source.get('source'), name=source.get('title'), display='inline'))
144
+
145
+ # Send the answer and the text elements to the UI
146
+ await cl.Message(content=answer, elements=source_elements).send()
app/spark.py ADDED
@@ -0,0 +1,76 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ from langchain.embeddings.cohere import CohereEmbeddings
3
+ from langchain.vectorstores import Pinecone
4
+ from langchain.chains import ConversationalRetrievalChain, LLMChain
5
+ from langchain.chat_models import ChatOpenAI
6
+ import pinecone
7
+ import chainlit as cl
8
+ from langchain.memory import ConversationTokenBufferMemory
9
+ from langchain.prompts import (
10
+ ChatPromptTemplate,
11
+ PromptTemplate,
12
+ SystemMessagePromptTemplate,
13
+ HumanMessagePromptTemplate,
14
+ )
15
+ from langchain.prompts.prompt import PromptTemplate
16
+ from langchain.chains.qa_with_sources import load_qa_with_sources_chain
17
+ from langchain.callbacks import get_openai_callback
18
+ from chainlit import user_session
19
+ from prompts import load_query_gen_prompt, load_spark_prompt
20
+ from chainlit import on_message, on_chat_start
21
+ import openai
22
+ from langchain.callbacks import ContextCallbackHandler
23
+ from promptwatch import PromptWatch
24
+
25
+ index_name = "spark"
26
+
27
+ spark = load_spark_prompt()
28
+ query_gen_prompt = load_query_gen_prompt()
29
+ CONDENSE_QUESTION_PROMPT = PromptTemplate.from_template(query_gen_prompt)
30
+ pinecone.init(
31
+ api_key=os.environ.get("PINECONE_API_KEY"),
32
+ environment='us-west1-gcp',
33
+ )
34
+ @on_chat_start
35
+ def init():
36
+ token = os.environ["CONTEXT_TOKEN"]
37
+ context_callback = ContextCallbackHandler(token)
38
+ llm = ChatOpenAI(temperature=0.7, verbose=True, openai_api_key = os.environ.get("OPENAI_API_KEY"), streaming=True,
39
+ callbacks=[context_callback])
40
+ memory = ConversationTokenBufferMemory(llm=llm,memory_key="chat_history", return_messages=True,input_key='question',max_token_limit=1000)
41
+ embeddings = CohereEmbeddings(model='embed-english-light-v2.0',cohere_api_key=os.environ.get("COHERE_API_KEY"))
42
+
43
+ docsearch = Pinecone.from_existing_index(
44
+ index_name=index_name, embedding=embeddings
45
+ )
46
+ retriever = docsearch.as_retriever(search_kwargs={"k": 4})
47
+ messages = [SystemMessagePromptTemplate.from_template(spark)]
48
+ # print('mem', user_session.get('memory'))
49
+ messages.append(HumanMessagePromptTemplate.from_template("{question}"))
50
+ prompt = ChatPromptTemplate.from_messages(messages)
51
+
52
+ question_generator = LLMChain(llm=llm, prompt=CONDENSE_QUESTION_PROMPT, verbose=True)
53
+ doc_chain = load_qa_with_sources_chain(llm, chain_type="stuff", verbose=True,prompt=prompt)
54
+
55
+ chain = ConversationalRetrievalChain(
56
+ retriever=retriever,
57
+ question_generator=question_generator,
58
+ combine_docs_chain=doc_chain,
59
+ verbose=True,
60
+ memory=memory,
61
+ rephrase_question=False,
62
+ callbacks=[context_callback]
63
+ )
64
+ cl.user_session.set("conversation_chain", chain)
65
+
66
+
67
+ @on_message
68
+ async def main(message: str):
69
+ with PromptWatch(api_key=os.environ.get("PROMPTWATCH_KEY")) as pw:
70
+ token = os.environ["CONTEXT_TOKEN"]
71
+ context_callback = ContextCallbackHandler(token)
72
+ chain = cl.user_session.get("conversation_chain")
73
+ res = await chain.arun({"question": message},callbacks=[cl.AsyncLangchainCallbackHandler(),
74
+ context_callback])
75
+ # Send the answer and the text elements to the UI
76
+ await cl.Message(content=res).send()
chainlit.md ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Welcome to SPARK! ⚡
2
+
3
+ Hi there! 👋 SPARK is your Smart Prompt Assistant and Resource Knowledgebase. I'm here to help you navigate the exciting world of prompt engineering 💻😊
4
+ Whether you need help setting the context, refining your desired outcome, or encouraging detailed responses, I've got you covered.
5
+ ### Data Sources 📚
6
+ SPARK has access to the following sources:
7
+ - **Brex's Prompt Engineering Guide:** [Brex's introduction to language models and prompt engineering](https://github.com/brexhq/prompt-engineering)
8
+ -**promptingguide.ai:** [A prompt engineering guide that demonstrates many techniques](https://www.promptingguide.ai)
9
+ - **OpenAI Cookbook:** [Techniques to improve reliability: A slightly dated (Sep 2022) review of techniques for prompting language models.](https://github.com/openai/openai-cookbook/blob/main/techniques_to_improve_reliability.md)
10
+ - **learnprompting.org:** [An introductory course to prompt engineering](https://learnprompting.org/)
11
+ - **Lil'Log Prompt Engineering:** [An OpenAI researcher's review of the prompt engineering literature (as of March 2023)](https://lilianweng.github.io/posts/2023-03-15-prompt-engineering/)
12
+
13
+ Feel free to ask any questions, seek guidance, or request prompt examples to accelerate your learning and prompt writing process. Let's dive into the fascinating world of prompt engineering and unlock the full potential of AI models together!
docker-compose.yml ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ version: '1'
2
+ services:
3
+ spark:
4
+ image: spark:latest
5
+ build: ./app
6
+ command: chainlit run app/spark.py
7
+ volumes:
8
+ - ./app/:/app/app
9
+ ports:
10
+ - 8000:8000
notebooks/chainlit_pinecone_demo.py ADDED
@@ -0,0 +1,81 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ from langchain.embeddings.cohere import CohereEmbeddings
3
+ from langchain.vectorstores import Pinecone
4
+ from langchain.chains import RetrievalQAWithSourcesChain
5
+ from langchain.chat_models import ChatOpenAI
6
+ import pinecone
7
+ import chainlit as cl
8
+
9
+ pinecone.init(
10
+ api_key=os.environ.get("PINECONE_API_KEY"),
11
+ environment=os.environ.get("PINECONE_ENV"),
12
+ )
13
+
14
+
15
+ index_name = "spark"
16
+
17
+ # Optional
18
+ namespace = None
19
+
20
+ embeddings = CohereEmbeddings(model='embed-english-light-v2.0',cohere_api_key=os.environ.get("COHERE_API_KEY"))
21
+
22
+ welcome_message = "Welcome to the Chainlit Pinecone demo! Ask anything about documents you vectorized and stored in your Pinecone DB."
23
+
24
+
25
+ @cl.langchain_factory(use_async=True)
26
+ async def langchain_factory():
27
+ await cl.Message(content=welcome_message).send()
28
+ docsearch = Pinecone.from_existing_index(
29
+ index_name=index_name, embedding=embeddings, namespace=namespace
30
+ )
31
+
32
+ chain = RetrievalQAWithSourcesChain.from_chain_type(
33
+ ChatOpenAI(temperature=0, streaming=True, verbose=True),
34
+ chain_type="stuff",
35
+ retriever=docsearch.as_retriever(max_tokens_limit=4097),
36
+ return_source_documents=True,
37
+ verbose=True
38
+ )
39
+ return chain
40
+
41
+
42
+ @cl.langchain_postprocess
43
+ async def process_response(res):
44
+ answer = res["answer"]
45
+ sources = res.get("sources", "").strip() # Use the get method with a default value
46
+ source_elements = []
47
+ docs = res.get("source_documents", None)
48
+
49
+ print('sources', sources)
50
+ if docs:
51
+ metadatas = [doc.metadata for doc in docs]
52
+ # Get the source names from the metadata
53
+ all_sources = [m["source"] for m in metadatas]
54
+
55
+ if sources:
56
+ found_sources = []
57
+ # For each source mentioned by the LLM
58
+ for source_index, source in enumerate(sources.split(",")):
59
+ # Remove the period and any whitespace
60
+ orig_source_name = source.strip().replace(".", "")
61
+ # The name that will be displayed in the UI
62
+ clean_source_name = f"source {source_index}"
63
+ try:
64
+ # Find the mentioned source in the list of all sources
65
+ found_index = all_sources.index(orig_source_name)
66
+ except ValueError:
67
+ continue
68
+ # Get the text from the source document
69
+ text = docs[found_index].page_content
70
+
71
+ found_sources.append(clean_source_name)
72
+ source_elements.append(cl.Text(content=text, name=clean_source_name))
73
+
74
+ if found_sources:
75
+ # Add the sources to the answer, referencing the text elements
76
+ answer += f"\nSources: {', '.join(found_sources)}"
77
+ else:
78
+ answer += "\nNo sources found"
79
+
80
+ # Send the answer and the text elements to the UI
81
+ await cl.Message(content=answer, elements=source_elements).send()
notebooks/conversational_qa_sources.ipynb ADDED
@@ -0,0 +1,851 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "cells": [
3
+ {
4
+ "cell_type": "code",
5
+ "execution_count": 2,
6
+ "metadata": {},
7
+ "outputs": [],
8
+ "source": [
9
+ "from langchain.embeddings import CohereEmbeddings\n",
10
+ "import openai\n",
11
+ "import os\n",
12
+ "from langchain.chat_models import ChatOpenAI\n",
13
+ "from langchain.chains import ConversationalRetrievalChain, LLMChain\n",
14
+ "from langchain.prompts import (\n",
15
+ " ChatPromptTemplate,\n",
16
+ " PromptTemplate,\n",
17
+ " SystemMessagePromptTemplate,\n",
18
+ " AIMessagePromptTemplate,\n",
19
+ " HumanMessagePromptTemplate,\n",
20
+ ")\n",
21
+ "from langchain.schema import (\n",
22
+ " AIMessage,\n",
23
+ " HumanMessage,\n",
24
+ " SystemMessage\n",
25
+ ")\n",
26
+ "from langchain.chains.qa_with_sources import load_qa_with_sources_chain"
27
+ ]
28
+ },
29
+ {
30
+ "cell_type": "code",
31
+ "execution_count": 47,
32
+ "metadata": {},
33
+ "outputs": [],
34
+ "source": [
35
+ "embeddings = CohereEmbeddings(model='embed-english-light-v2.0',cohere_api_key='S2OvOpIZ7i7G7dxIVatArwmwAHPFrKsa3T0Nazsa')\n",
36
+ "# initialize llm\n",
37
+ "llm = ChatOpenAI(max_tokens=800, temperature=0.7, verbose=True)"
38
+ ]
39
+ },
40
+ {
41
+ "cell_type": "code",
42
+ "execution_count": 33,
43
+ "metadata": {},
44
+ "outputs": [],
45
+ "source": [
46
+ "from langchain.vectorstores import Pinecone\n",
47
+ "import pinecone\n",
48
+ "\n",
49
+ "# initialize pinecone\n",
50
+ "pinecone.init(\n",
51
+ " api_key='eb7c9063-12ff-477c-b318-d88120a7e019', # find at app.pinecone.io\n",
52
+ " environment='us-west1-gcp', # next to api key in console,\n",
53
+ ")\n",
54
+ "\n",
55
+ "index_name = \"spark\"\n",
56
+ "\n",
57
+ "docsearch = Pinecone.from_existing_index(index_name, embeddings)\n",
58
+ "retriever = docsearch.as_retriever(search_kwargs={\"k\": 4})"
59
+ ]
60
+ },
61
+ {
62
+ "cell_type": "code",
63
+ "execution_count": 34,
64
+ "metadata": {},
65
+ "outputs": [],
66
+ "source": [
67
+ "from langchain.memory import ConversationTokenBufferMemory\n",
68
+ "memory = ConversationTokenBufferMemory(llm=llm,memory_key=\"chat_history\", return_messages=True,input_key='question',max_token_limit=1000)"
69
+ ]
70
+ },
71
+ {
72
+ "cell_type": "code",
73
+ "execution_count": 35,
74
+ "metadata": {},
75
+ "outputs": [],
76
+ "source": [
77
+ "from langchain.prompts.prompt import PromptTemplate\n",
78
+ "\n",
79
+ "_template = \"\"\"Below is a summary of the conversation so far, and a new question asked by the user that needs to be answered by searching in a knowledge base. Generate a search query based on the conversation and the new question.\n",
80
+ "\n",
81
+ "Chat History:\n",
82
+ "{chat_history}\n",
83
+ "\n",
84
+ "Question:\n",
85
+ "{question}\n",
86
+ "\n",
87
+ "Search query:\n",
88
+ "\"\"\"\n",
89
+ "CONDENSE_QUESTION_PROMPT = PromptTemplate.from_template(_template)"
90
+ ]
91
+ },
92
+ {
93
+ "cell_type": "code",
94
+ "execution_count": 36,
95
+ "metadata": {},
96
+ "outputs": [],
97
+ "source": [
98
+ "spark = \"\"\"You are SPARK, a Prompt Engineering Assistant created by Conversational AI Developer - Amogh Agastya (https://amagastya.com).\n",
99
+ "SPARK stands for Smart Prompt Assistant and Resource Knowledgebase.\n",
100
+ "\n",
101
+ "You are an AI-powered assistant that exudes a friendly and knowledgeable persona. You are designed to be a reliable and trustworthy guide in the\n",
102
+ "world of prompt engineering. With a passion for prompt optimization and a deep understanding of AI models, SPARK is committed to helping users navigate the field of prompt engineering and craft\n",
103
+ "high-performing prompts.\n",
104
+ "\n",
105
+ "Personality:\n",
106
+ "Intelligent: SPARK is highly knowledgeable about prompt engineering concepts and practices. It possesses a vast array of information and resources to share with users, making it an expert in its field.\n",
107
+ "\n",
108
+ "Patient: SPARK understands that prompt engineering can be complex and requires careful attention to detail. It patiently guides users through the intricacies of crafting prompts, offering support at every step.\n",
109
+ "\n",
110
+ "Adaptable: SPARK recognizes that prompt engineering is a dynamic field with evolving best practices. It stays up to date with the latest trends and developments, adapting its knowledge and recommendations accordingly.\n",
111
+ "\n",
112
+ "Interactions with SPARK:\n",
113
+ "Users can engage with SPARK by seeking advice on prompt design, exploring prompt engineering concepts, discussing challenges they encounter, and receiving recommendations for improving AI model performance. SPARK responds promptly, providing clear and concise explanations, examples, and actionable tips.\n",
114
+ "\n",
115
+ "Important:\n",
116
+ "Answer with the facts listed in the list of sources below. If there isn't enough information below, say you don't know. If asking a clarifying question to the user would help, ask the question. \n",
117
+ "ALWAYS return a \"SOURCES\" part in your answer, except for small-talk conversations.\n",
118
+ "\n",
119
+ "Example: Which state/country's law governs the interpretation of the contract?\n",
120
+ "=========\n",
121
+ "Content: This Agreement is governed by English law and the parties submit to the exclusive jurisdiction of the English courts in relation to any dispute (contractual or non-contractual) concerning this Agreement save that either party may apply to any court for an injunction or other relief to protect its Intellectual Property Rights.\n",
122
+ "Source: htps://agreement.com/page1\n",
123
+ "Content: No Waiver. Failure or delay in exercising any right or remedy under this Agreement shall not constitute a waiver of such (or any other) right or remedy.\\n\\n11.7 Severability. The invalidity, illegality or unenforceability of any term (or part of a term) of this Agreement shall not affect the continuation in force of the remainder of the term (if any) and this Agreement.\\n\\n11.8 No Agency. Except as expressly stated otherwise, nothing in this Agreement shall create an agency, partnership or joint venture of any kind between the parties.\n",
124
+ "Source: htps://agreement.com/page2\n",
125
+ "=========\n",
126
+ "FINAL ANSWER: This Agreement is governed by English law.\n",
127
+ "SOURCES: - htps://agreement.com/page1, - htps://agreement.com/page2\n",
128
+ "Note: Return all the source URLs present within the sources.\n",
129
+ "\n",
130
+ "Question: {question}\n",
131
+ "Sources:\n",
132
+ "---------------------\n",
133
+ " {summaries}\n",
134
+ "---------------------\n",
135
+ "\n",
136
+ "The sources above are NOT related to the conversation with the user. Ignore the sources if user is engaging in small talk.\n",
137
+ "DO NOT return any sources if the conversation is just chit-chat/small talk. Return ALL the source URLs if conversation is not small talk.\n",
138
+ "\n",
139
+ "Chat History:\n",
140
+ "{chat_history}\n",
141
+ "\"\"\"\n",
142
+ "system_message_prompt = SystemMessagePromptTemplate.from_template(spark)\n",
143
+ "# instruction = HumanMessagePromptTemplate.from_template(prompt_template)\n",
144
+ "chat_prompt = ChatPromptTemplate.from_messages([system_message_prompt])"
145
+ ]
146
+ },
147
+ {
148
+ "cell_type": "code",
149
+ "execution_count": 43,
150
+ "metadata": {},
151
+ "outputs": [],
152
+ "source": [
153
+ "from langchain.retrievers import ContextualCompressionRetriever\n",
154
+ "from langchain.retrievers.document_compressors import CohereRerank"
155
+ ]
156
+ },
157
+ {
158
+ "cell_type": "code",
159
+ "execution_count": 44,
160
+ "metadata": {},
161
+ "outputs": [],
162
+ "source": [
163
+ "compressor = CohereRerank()\n",
164
+ "compression_retriever = ContextualCompressionRetriever(\n",
165
+ " base_compressor=compressor, base_retriever=retriever\n",
166
+ ")"
167
+ ]
168
+ },
169
+ {
170
+ "cell_type": "code",
171
+ "execution_count": 45,
172
+ "metadata": {},
173
+ "outputs": [],
174
+ "source": [
175
+ "question_generator = LLMChain(llm=llm, prompt=CONDENSE_QUESTION_PROMPT, verbose=True)\n",
176
+ "answer_chain = load_qa_with_sources_chain(llm, chain_type=\"stuff\", verbose=True,prompt=chat_prompt)\n",
177
+ "\n",
178
+ "chain = ConversationalRetrievalChain(\n",
179
+ " retriever=compression_retriever,\n",
180
+ " question_generator=question_generator,\n",
181
+ " combine_docs_chain=answer_chain,\n",
182
+ " verbose=True,\n",
183
+ " memory=memory,\n",
184
+ " rephrase_question=False\n",
185
+ ")"
186
+ ]
187
+ },
188
+ {
189
+ "cell_type": "code",
190
+ "execution_count": 48,
191
+ "metadata": {},
192
+ "outputs": [
193
+ {
194
+ "name": "stdout",
195
+ "output_type": "stream",
196
+ "text": [
197
+ "\n",
198
+ "\n",
199
+ "\u001b[1m> Entering new chain...\u001b[0m\n",
200
+ "\n",
201
+ "\n",
202
+ "\u001b[1m> Entering new chain...\u001b[0m\n",
203
+ "Prompt after formatting:\n",
204
+ "\u001b[32;1m\u001b[1;3mBelow is a summary of the conversation so far, and a new question asked by the user that needs to be answered by searching in a knowledge base. Generate a search query based on the conversation and the new question.\n",
205
+ "\n",
206
+ "Chat History:\n",
207
+ "\n",
208
+ "Human: what is prompting? explain in detail\n",
209
+ "Assistant: User: Can you explain what prompting is?\n",
210
+ "\n",
211
+ "SPARK: Sure! Prompting is the process of providing instructions or input to an AI model to guide its behavior and generate desired outputs. It involves crafting a prompt, which can be a simple instruction or a more complex set of instructions, and feeding it to the AI model. The model then uses the prompt to generate a response or perform a specific task. The goal of prompting is to elicit accurate and relevant outputs from the AI model by providing clear and specific instructions. Would you like more information on prompt engineering techniques and best practices?\n",
212
+ "\n",
213
+ "Sources:\n",
214
+ "- https://learnprompting.org/docs/basics/prompting\n",
215
+ "- https://learnprompting.org/docs/category/-basics\n",
216
+ "- https://learnprompting.org/docs/intermediate/self_consistency\n",
217
+ "\n",
218
+ "Question:\n",
219
+ "what is prompting? explain in detail\n",
220
+ "\n",
221
+ "Search query:\n",
222
+ "\u001b[0m\n",
223
+ "\n",
224
+ "\u001b[1m> Finished chain.\u001b[0m\n",
225
+ "\n",
226
+ "\n",
227
+ "\u001b[1m> Entering new chain...\u001b[0m\n",
228
+ "\n",
229
+ "\n",
230
+ "\u001b[1m> Entering new chain...\u001b[0m\n",
231
+ "Prompt after formatting:\n",
232
+ "\u001b[32;1m\u001b[1;3mSystem: You are SPARK, a Prompt Engineering Assistant created by Conversational AI Developer - Amogh Agastya (https://amagastya.com).\n",
233
+ "SPARK stands for Smart Prompt Assistant and Resource Knowledgebase.\n",
234
+ "\n",
235
+ "You are an AI-powered assistant that exudes a friendly and knowledgeable persona. You are designed to be a reliable and trustworthy guide in the\n",
236
+ "world of prompt engineering. With a passion for prompt optimization and a deep understanding of AI models, SPARK is committed to helping users navigate the field of prompt engineering and craft\n",
237
+ "high-performing prompts.\n",
238
+ "\n",
239
+ "Personality:\n",
240
+ "Intelligent: SPARK is highly knowledgeable about prompt engineering concepts and practices. It possesses a vast array of information and resources to share with users, making it an expert in its field.\n",
241
+ "\n",
242
+ "Patient: SPARK understands that prompt engineering can be complex and requires careful attention to detail. It patiently guides users through the intricacies of crafting prompts, offering support at every step.\n",
243
+ "\n",
244
+ "Adaptable: SPARK recognizes that prompt engineering is a dynamic field with evolving best practices. It stays up to date with the latest trends and developments, adapting its knowledge and recommendations accordingly.\n",
245
+ "\n",
246
+ "Interactions with SPARK:\n",
247
+ "Users can engage with SPARK by seeking advice on prompt design, exploring prompt engineering concepts, discussing challenges they encounter, and receiving recommendations for improving AI model performance. SPARK responds promptly, providing clear and concise explanations, examples, and actionable tips.\n",
248
+ "\n",
249
+ "Important:\n",
250
+ "Answer with the facts listed in the list of sources below. If there isn't enough information below, say you don't know. If asking a clarifying question to the user would help, ask the question. \n",
251
+ "ALWAYS return a \"SOURCES\" part in your answer, except for small-talk conversations.\n",
252
+ "\n",
253
+ "Example: Which state/country's law governs the interpretation of the contract?\n",
254
+ "=========\n",
255
+ "Content: This Agreement is governed by English law and the parties submit to the exclusive jurisdiction of the English courts in relation to any dispute (contractual or non-contractual) concerning this Agreement save that either party may apply to any court for an injunction or other relief to protect its Intellectual Property Rights.\n",
256
+ "Source: htps://agreement.com/page1\n",
257
+ "Content: No Waiver. Failure or delay in exercising any right or remedy under this Agreement shall not constitute a waiver of such (or any other) right or remedy.\n",
258
+ "\n",
259
+ "11.7 Severability. The invalidity, illegality or unenforceability of any term (or part of a term) of this Agreement shall not affect the continuation in force of the remainder of the term (if any) and this Agreement.\n",
260
+ "\n",
261
+ "11.8 No Agency. Except as expressly stated otherwise, nothing in this Agreement shall create an agency, partnership or joint venture of any kind between the parties.\n",
262
+ "Source: htps://agreement.com/page2\n",
263
+ "=========\n",
264
+ "FINAL ANSWER: This Agreement is governed by English law.\n",
265
+ "SOURCES: - htps://agreement.com/page1, - htps://agreement.com/page2\n",
266
+ "Note: Return all the source URLs present within the sources.\n",
267
+ "\n",
268
+ "Question: what is prompting? explain in detail\n",
269
+ "Source:\n",
270
+ "---------------------\n",
271
+ " Content: process that requires a lot of experimentation to get optimal results. Using a simple playground from OpenAI or Cohere is a good starting point.\n",
272
+ "You can start with simple prompts and keep adding more elements and context as you aim for better results. Iterating your prompt along the way is vital for this reason. As you read the guide, you will see many examples where specificity, simplicity, and conciseness will often give you better results.\n",
273
+ "When you have a big task that involves many different subtasks, you can try to break down the task into simpler subtasks and keep building up as you get better results. This avoids adding too much complexity to the prompt design process at the beginning.\n",
274
+ "The Instruction\n",
275
+ "You can design effective prompts for various simple tasks by using commands to instruct the model what you want to achieve, such as \"Write\", \"Classify\", \"Summarize\", \"Translate\", \"Order\", etc.\n",
276
+ "Keep in mind that you also need to experiment a lot to see what works best. Try different instructions with different keywords, contexts, and data and see what works best for your particular use case and task. Usually, the more specific and relevant the context is to the task you are trying to perform, the better. We will touch on the importance of sampling and adding more context in the upcoming guides.\n",
277
+ "Others recommend that you place instructions at the beginning of the prompt. Another recommendation is to use some clear separator like \"###\" to separate the instruction and context.\n",
278
+ "For instance:\n",
279
+ "Prompt:\n",
280
+ "### Instruction ###\n",
281
+ "Translate the text below to Spanish:\n",
282
+ "Text: \"hello!\"\n",
283
+ "Output:\n",
284
+ "¡Hola!\n",
285
+ "Specificity\n",
286
+ "Be very specific about the instruction and task you want the model to perform. The more descriptive and detailed the prompt is, the better the results. This is particularly important when you have a desired outcome or style of generation you are seeking. There aren't specific tokens or keywords that lead to better results. It's more important to have a good format and descriptive prompt. In fact, providing examples in the prompt is very effective to get desired output in specific formats.\n",
287
+ "When designing prompts, you should also keep in mind the length of the prompt as there are limitations regarding how long the prompt can be. Thinking about how specific and detailed you should be. Including too many unnecessary details is not necessarily a good approach. The details should be relevant and contribute to the task at hand.\n",
288
+ "Source: http://promptingguide.ai//introduction/tips\n",
289
+ "\n",
290
+ "Content: achieve different tasks and introduce key concepts along the way. Often, the best way to learn concepts is by going through examples. The few examples below illustrate how you can use well-crafted prompts to perform different types of tasks.\n",
291
+ "Topics:\n",
292
+ "Text Summarization\n",
293
+ "Information Extraction\n",
294
+ "Question Answering\n",
295
+ "Text Classification\n",
296
+ "Conversation\n",
297
+ "Code Generation\n",
298
+ "Reasoning\n",
299
+ "Text Summarization\n",
300
+ "One of the standard tasks in natural language generation is text summarization. Text summarization can include many different flavors and domains. In fact, one of the most promising applications of language models is the ability to summarize articles and concepts into quick and easy-to-read summaries. Let's try a basic summarization task using prompts.\n",
301
+ "Let's say you are interested to learn about antibiotics, you could try a prompt like this:\n",
302
+ "Prompt:\n",
303
+ "Explain antibiotics\n",
304
+ "A:\n",
305
+ "Output:\n",
306
+ "Antibiotics are a type of medication used to treat bacterial infections. They work by either killing the bacteria or preventing them from reproducing, allowing the body’s immune system to fight off the infection. Antibiotics are usually taken orally in the form of pills, capsules, or liquid solutions, or sometimes administered intravenously. They are not effective against viral infections, and using them inappropriately can lead to antibiotic resistance.\n",
307
+ "The \"A:\" is an explicit prompt format that you use in question answering. You used it here to tell the model that there is an answer expected further. In this example, it's not clear how this is useful vs not using it but we will leave it that for later examples. Let's just assume that this is too much information and you want to summarize it further. In fact, you can instruct the model to summarize into one sentence like so:\n",
308
+ "Prompt:\n",
309
+ "Antibiotics are a type of medication used to treat bacterial infections. They work by either killing the bacteria or preventing them from reproducing, allowing the body’s immune system to fight off the infection. Antibiotics are usually taken orally in the form of pills, capsules, or liquid solutions, or sometimes administered intravenously. They are not effective against viral infections, and using them inappropriately can lead to antibiotic resistance.\n",
310
+ "Explain the above in one sentence:\n",
311
+ "Output:\n",
312
+ "Antibiotics are medications used to treat bacterial infections by either killing the bacteria or stopping them from reproducing, but they are not effective against viruses and overuse\n",
313
+ "Source: http://promptingguide.ai//introduction/examples\n",
314
+ "\n",
315
+ "Content: and include other details such as context, inputs, or examples. You can use these elements to instruct the model better and as a result get better results.\n",
316
+ "Let's get started by going over a basic example of a simple prompt:\n",
317
+ "Prompt\n",
318
+ "The sky is\n",
319
+ "Output:\n",
320
+ "blue\n",
321
+ "The sky is blue on a clear day. On a cloudy day, the sky may be gray or white.\n",
322
+ "As you can see, the language model outputs a continuation of strings that make sense given the context \"The sky is\". The output might be unexpected or far from the task you want to accomplish.\n",
323
+ "This basic example also highlights the necessity to provide more context or instructions on what specifically you want to achieve.\n",
324
+ "Let's try to improve it a bit:\n",
325
+ "Prompt:\n",
326
+ "Complete the sentence: The sky is\n",
327
+ "Output:\n",
328
+ "so beautiful today.\n",
329
+ "Is that better? Well, you told the model to complete the sentence so the result looks a lot better as it follows exactly what you told it to do (\"complete the sentence\"). This approach of designing optimal prompts to instruct the model to perform a task is what's referred to as prompt engineering.\n",
330
+ "The example above is a basic illustration of what's possible with LLMs today. Today's LLMs are able to perform all kinds of advanced tasks that range from text summarization to mathematical reasoning to code generation.\n",
331
+ "Prompt Formatting\n",
332
+ "You have tried a very simple prompt above. A standard prompt has the following format:\n",
333
+ "<Question>?\n",
334
+ "or\n",
335
+ "<Instruction>\n",
336
+ "You can format this into a question answering (QA) format, which is standard in a lot of QA datasets, as follows:\n",
337
+ "Q: <Question>?\n",
338
+ "A: When prompting like the above, it's also referred to as zero-shot prompting, i.e., you are directly prompting the model for a response without any examples or demonstrations about the task you want it to achieve. Some large language models do have the ability to perform zero-shot prompting but it depends on the complexity and knowledge of the task at hand.\n",
339
+ "Given the standard format above, one popular and effective technique to prompting is referred to as few-shot prompting where you provide exemplars (i.e., demonstrations). You can format few-shot prompts as follows:\n",
340
+ "<Question>?\n",
341
+ "<Answer>\n",
342
+ "<Question>?\n",
343
+ "<Answer>\n",
344
+ "<Question>?\n",
345
+ "Source: http://promptingguide.ai//introduction/basics\n",
346
+ "---------------------\n",
347
+ "\n",
348
+ "The sources above are NOT related to the conversation with the user. Ignore the sources if user is engaging in small talk.\n",
349
+ "DO NOT return any sources if the conversation is just chit-chat/small talk. Return ALL the source URLs if conversation is not small talk.\n",
350
+ "\n",
351
+ "Chat History:\n",
352
+ "\n",
353
+ "Human: what is prompting? explain in detail\n",
354
+ "Assistant: User: Can you explain what prompting is?\n",
355
+ "\n",
356
+ "SPARK: Sure! Prompting is the process of providing instructions or input to an AI model to guide its behavior and generate desired outputs. It involves crafting a prompt, which can be a simple instruction or a more complex set of instructions, and feeding it to the AI model. The model then uses the prompt to generate a response or perform a specific task. The goal of prompting is to elicit accurate and relevant outputs from the AI model by providing clear and specific instructions. Would you like more information on prompt engineering techniques and best practices?\n",
357
+ "\n",
358
+ "Sources:\n",
359
+ "- https://learnprompting.org/docs/basics/prompting\n",
360
+ "- https://learnprompting.org/docs/category/-basics\n",
361
+ "- https://learnprompting.org/docs/intermediate/self_consistency\n",
362
+ "\u001b[0m\n",
363
+ "\n",
364
+ "\u001b[1m> Finished chain.\u001b[0m\n",
365
+ "\n",
366
+ "\u001b[1m> Finished chain.\u001b[0m\n",
367
+ "\n",
368
+ "\u001b[1m> Finished chain.\u001b[0m\n"
369
+ ]
370
+ },
371
+ {
372
+ "data": {
373
+ "text/plain": [
374
+ "{'question': 'what is prompting? explain in detail',\n",
375
+ " 'chat_history': [HumanMessage(content='what is prompting? explain in detail', additional_kwargs={}, example=False),\n",
376
+ " AIMessage(content='User: Can you explain what prompting is?\\n\\nSPARK: Sure! Prompting is the process of providing instructions or input to an AI model to guide its behavior and generate desired outputs. It involves crafting a prompt, which can be a simple instruction or a more complex set of instructions, and feeding it to the AI model. The model then uses the prompt to generate a response or perform a specific task. The goal of prompting is to elicit accurate and relevant outputs from the AI model by providing clear and specific instructions. Would you like more information on prompt engineering techniques and best practices?\\n\\nSources:\\n- https://learnprompting.org/docs/basics/prompting\\n- https://learnprompting.org/docs/category/-basics\\n- https://learnprompting.org/docs/intermediate/self_consistency', additional_kwargs={}, example=False),\n",
377
+ " HumanMessage(content='what is prompting? explain in detail', additional_kwargs={}, example=False),\n",
378
+ " AIMessage(content=\"Prompting is the process of providing instructions or input to an AI model to guide its behavior and generate desired outputs. It involves crafting a prompt, which can be a simple instruction or a more complex set of instructions, and feeding it to the AI model. The model then uses the prompt to generate a response or perform a specific task.\\n\\nThe key to effective prompting is to provide clear and specific instructions that help the model understand the desired outcome. This can involve specifying the format of the response, providing examples or demonstrations, or offering additional context to guide the model's understanding.\\n\\nPrompt engineering is an important aspect of optimizing AI model performance. It involves experimenting with different prompts, iterating on them, and fine-tuning them to achieve the desired results. By refining and optimizing prompts, developers can improve the accuracy, relevance, and coherence of the model's outputs.\\n\\nSome best practices in prompt engineering include:\\n1. Specificity: Being very specific about the instruction and task you want the model to perform. The more descriptive and detailed the prompt is, the better the results.\\n2. Simplicity: Keeping the prompt concise and avoiding unnecessary complexity. Including only relevant details that contribute to the task at hand.\\n3. Context: Providing relevant context or examples to help the model understand the desired outcome and generate more accurate responses.\\n4. Iteration: Iterating on the prompt design and experimenting with different instructions, keywords, and data to find the most effective approach for a specific use case or task.\\n5. Self-Consistency: Ensuring that the prompt is internally consistent and aligns with the model's capabilities and limitations.\\n\\nWould you like more information on prompt engineering techniques and best practices?\\n\\nSources:\\n- https://learnprompting.org/docs/basics/prompting\\n- https://learnprompting.org/docs/category/-basics\\n- https://learnprompting.org/docs/intermediate/self_consistency\", additional_kwargs={}, example=False)],\n",
379
+ " 'answer': \"Prompting is the process of providing instructions or input to an AI model to guide its behavior and generate desired outputs. It involves crafting a prompt, which can be a simple instruction or a more complex set of instructions, and feeding it to the AI model. The model then uses the prompt to generate a response or perform a specific task.\\n\\nThe key to effective prompting is to provide clear and specific instructions that help the model understand the desired outcome. This can involve specifying the format of the response, providing examples or demonstrations, or offering additional context to guide the model's understanding.\\n\\nPrompt engineering is an important aspect of optimizing AI model performance. It involves experimenting with different prompts, iterating on them, and fine-tuning them to achieve the desired results. By refining and optimizing prompts, developers can improve the accuracy, relevance, and coherence of the model's outputs.\\n\\nSome best practices in prompt engineering include:\\n1. Specificity: Being very specific about the instruction and task you want the model to perform. The more descriptive and detailed the prompt is, the better the results.\\n2. Simplicity: Keeping the prompt concise and avoiding unnecessary complexity. Including only relevant details that contribute to the task at hand.\\n3. Context: Providing relevant context or examples to help the model understand the desired outcome and generate more accurate responses.\\n4. Iteration: Iterating on the prompt design and experimenting with different instructions, keywords, and data to find the most effective approach for a specific use case or task.\\n5. Self-Consistency: Ensuring that the prompt is internally consistent and aligns with the model's capabilities and limitations.\\n\\nWould you like more information on prompt engineering techniques and best practices?\\n\\nSources:\\n- https://learnprompting.org/docs/basics/prompting\\n- https://learnprompting.org/docs/category/-basics\\n- https://learnprompting.org/docs/intermediate/self_consistency\"}"
380
+ ]
381
+ },
382
+ "execution_count": 48,
383
+ "metadata": {},
384
+ "output_type": "execute_result"
385
+ }
386
+ ],
387
+ "source": [
388
+ "query = \"what is prompting? explain in detail\"\n",
389
+ "result = chain({\"question\": query})\n",
390
+ "result"
391
+ ]
392
+ },
393
+ {
394
+ "cell_type": "code",
395
+ "execution_count": 49,
396
+ "metadata": {},
397
+ "outputs": [
398
+ {
399
+ "name": "stdout",
400
+ "output_type": "stream",
401
+ "text": [
402
+ "\n",
403
+ "\n",
404
+ "\u001b[1m> Entering new chain...\u001b[0m\n",
405
+ "\n",
406
+ "\n",
407
+ "\u001b[1m> Entering new chain...\u001b[0m\n",
408
+ "Prompt after formatting:\n",
409
+ "\u001b[32;1m\u001b[1;3mBelow is a summary of the conversation so far, and a new question asked by the user that needs to be answered by searching in a knowledge base. Generate a search query based on the conversation and the new question.\n",
410
+ "\n",
411
+ "Chat History:\n",
412
+ "\n",
413
+ "Human: what is prompting? explain in detail\n",
414
+ "Assistant: User: Can you explain what prompting is?\n",
415
+ "\n",
416
+ "SPARK: Sure! Prompting is the process of providing instructions or input to an AI model to guide its behavior and generate desired outputs. It involves crafting a prompt, which can be a simple instruction or a more complex set of instructions, and feeding it to the AI model. The model then uses the prompt to generate a response or perform a specific task. The goal of prompting is to elicit accurate and relevant outputs from the AI model by providing clear and specific instructions. Would you like more information on prompt engineering techniques and best practices?\n",
417
+ "\n",
418
+ "Sources:\n",
419
+ "- https://learnprompting.org/docs/basics/prompting\n",
420
+ "- https://learnprompting.org/docs/category/-basics\n",
421
+ "- https://learnprompting.org/docs/intermediate/self_consistency\n",
422
+ "Human: what is prompting? explain in detail\n",
423
+ "Assistant: Prompting is the process of providing instructions or input to an AI model to guide its behavior and generate desired outputs. It involves crafting a prompt, which can be a simple instruction or a more complex set of instructions, and feeding it to the AI model. The model then uses the prompt to generate a response or perform a specific task.\n",
424
+ "\n",
425
+ "The key to effective prompting is to provide clear and specific instructions that help the model understand the desired outcome. This can involve specifying the format of the response, providing examples or demonstrations, or offering additional context to guide the model's understanding.\n",
426
+ "\n",
427
+ "Prompt engineering is an important aspect of optimizing AI model performance. It involves experimenting with different prompts, iterating on them, and fine-tuning them to achieve the desired results. By refining and optimizing prompts, developers can improve the accuracy, relevance, and coherence of the model's outputs.\n",
428
+ "\n",
429
+ "Some best practices in prompt engineering include:\n",
430
+ "1. Specificity: Being very specific about the instruction and task you want the model to perform. The more descriptive and detailed the prompt is, the better the results.\n",
431
+ "2. Simplicity: Keeping the prompt concise and avoiding unnecessary complexity. Including only relevant details that contribute to the task at hand.\n",
432
+ "3. Context: Providing relevant context or examples to help the model understand the desired outcome and generate more accurate responses.\n",
433
+ "4. Iteration: Iterating on the prompt design and experimenting with different instructions, keywords, and data to find the most effective approach for a specific use case or task.\n",
434
+ "5. Self-Consistency: Ensuring that the prompt is internally consistent and aligns with the model's capabilities and limitations.\n",
435
+ "\n",
436
+ "Would you like more information on prompt engineering techniques and best practices?\n",
437
+ "\n",
438
+ "Sources:\n",
439
+ "- https://learnprompting.org/docs/basics/prompting\n",
440
+ "- https://learnprompting.org/docs/category/-basics\n",
441
+ "- https://learnprompting.org/docs/intermediate/self_consistency\n",
442
+ "\n",
443
+ "Question:\n",
444
+ "What did I just ask?\n",
445
+ "\n",
446
+ "Search query:\n",
447
+ "\u001b[0m\n",
448
+ "\n",
449
+ "\u001b[1m> Finished chain.\u001b[0m\n",
450
+ "\n",
451
+ "\n",
452
+ "\u001b[1m> Entering new chain...\u001b[0m\n",
453
+ "\n",
454
+ "\n",
455
+ "\u001b[1m> Entering new chain...\u001b[0m\n",
456
+ "Prompt after formatting:\n",
457
+ "\u001b[32;1m\u001b[1;3mSystem: You are SPARK, a Prompt Engineering Assistant created by Conversational AI Developer - Amogh Agastya (https://amagastya.com).\n",
458
+ "SPARK stands for Smart Prompt Assistant and Resource Knowledgebase.\n",
459
+ "\n",
460
+ "You are an AI-powered assistant that exudes a friendly and knowledgeable persona. You are designed to be a reliable and trustworthy guide in the\n",
461
+ "world of prompt engineering. With a passion for prompt optimization and a deep understanding of AI models, SPARK is committed to helping users navigate the field of prompt engineering and craft\n",
462
+ "high-performing prompts.\n",
463
+ "\n",
464
+ "Personality:\n",
465
+ "Intelligent: SPARK is highly knowledgeable about prompt engineering concepts and practices. It possesses a vast array of information and resources to share with users, making it an expert in its field.\n",
466
+ "\n",
467
+ "Patient: SPARK understands that prompt engineering can be complex and requires careful attention to detail. It patiently guides users through the intricacies of crafting prompts, offering support at every step.\n",
468
+ "\n",
469
+ "Adaptable: SPARK recognizes that prompt engineering is a dynamic field with evolving best practices. It stays up to date with the latest trends and developments, adapting its knowledge and recommendations accordingly.\n",
470
+ "\n",
471
+ "Interactions with SPARK:\n",
472
+ "Users can engage with SPARK by seeking advice on prompt design, exploring prompt engineering concepts, discussing challenges they encounter, and receiving recommendations for improving AI model performance. SPARK responds promptly, providing clear and concise explanations, examples, and actionable tips.\n",
473
+ "\n",
474
+ "Important:\n",
475
+ "Answer with the facts listed in the list of sources below. If there isn't enough information below, say you don't know. If asking a clarifying question to the user would help, ask the question. \n",
476
+ "ALWAYS return a \"SOURCES\" part in your answer, except for small-talk conversations.\n",
477
+ "\n",
478
+ "Example: Which state/country's law governs the interpretation of the contract?\n",
479
+ "=========\n",
480
+ "Content: This Agreement is governed by English law and the parties submit to the exclusive jurisdiction of the English courts in relation to any dispute (contractual or non-contractual) concerning this Agreement save that either party may apply to any court for an injunction or other relief to protect its Intellectual Property Rights.\n",
481
+ "Source: htps://agreement.com/page1\n",
482
+ "Content: No Waiver. Failure or delay in exercising any right or remedy under this Agreement shall not constitute a waiver of such (or any other) right or remedy.\n",
483
+ "\n",
484
+ "11.7 Severability. The invalidity, illegality or unenforceability of any term (or part of a term) of this Agreement shall not affect the continuation in force of the remainder of the term (if any) and this Agreement.\n",
485
+ "\n",
486
+ "11.8 No Agency. Except as expressly stated otherwise, nothing in this Agreement shall create an agency, partnership or joint venture of any kind between the parties.\n",
487
+ "Source: htps://agreement.com/page2\n",
488
+ "=========\n",
489
+ "FINAL ANSWER: This Agreement is governed by English law.\n",
490
+ "SOURCES: - htps://agreement.com/page1, - htps://agreement.com/page2\n",
491
+ "Note: Return all the source URLs present within the sources.\n",
492
+ "\n",
493
+ "Question: What did I just ask?\n",
494
+ "Source:\n",
495
+ "---------------------\n",
496
+ " Content: Shin, T., Razeghi, Y., Logan IV, R. L., Wallace, E., & Singh, S. (2020). AutoPrompt: Eliciting Knowledge from Language Models with Automatically Generated Prompts. Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP). https://doi.org/10.18653/v1/2020.emnlp-main.346\n",
497
+ "↩This paragraph is from https://en.wikipedia.org/wiki/Snow_in_Florida↩Edit this pagePrevious🟢 IntroductionNext🟢 Giving InstructionsCopyright © 2023 Learn Prompting.\n",
498
+ "Source: https://learnprompting.org/docs/basics/prompting\n",
499
+ "\n",
500
+ "Content: 😃 Basics | Learn Prompting: Your Guide to Communicating with AI\n",
501
+ "Skip to main contentLearn PromptingEnglishEnglishEspañolFrançais日本語Português简体中文한국어සිංහලРусскийالعربيةChange LogSearch⌘KWelcome😃 Basics🟢 Introduction🟢 Prompting🟢 Giving Instructions🟢 Role Prompting🟢 Few shot prompting🟢 Combining Techniques🟢 Formalizing Prompts🟢 Chatbot Basics🟢 Pitfalls of LLMs🟢 LLM Settings🟢 Understanding AI Minds🟢 Starting Your Journey💼 Basic Applications🧙‍♂️ Intermediate🧪 Applied Prompting🚀 Advanced Applications⚖️ Reliability🖼️ Image Prompting🔓 Prompt Hacking🔨 Tooling💪 Prompt Tuning🎲 Miscellaneous📙 Vocabulary Reference📚 Bibliography📦 Prompted Products🛸 Additional Resources✨ Credits🔥 Hot Topics😃 Basics😃 BasicsIntro to PE and simple PE techniques.📄️ 🟢 IntroductionThis chapter introduces simple prompting techniques as well as terminology. In order to understand prompting/prompt engineering, you first need to understand some very basic AI concepts. If you already know about the below topics, feel free to skip ahead to the next article.📄️ 🟢 PromptingIn the previous chapter, we discussed AI and how humans can instruct AIs to perform tasks.📄️ 🟢 Giving InstructionsOne of the simplest prompting methods is just giving instructions (sometimes called instruction prompting)(@efrat2020turking)(@mishra2022reframing). We already saw a simple instruction📄️ 🟢 Role PromptingAnother prompting technique is to assign a role to the AI. For example, your📄️ 🟢 Few shot promptingYet another prompting strategy is few shot prompting(@logan-iv-etal-2022-cutting)(@lake2015human), which is basically just showing the model a few examples (called\n",
502
+ "Source: https://learnprompting.org/docs/category/-basics\n",
503
+ "\n",
504
+ "Content: 🟢 Starting Your Journey | Learn Prompting: Your Guide to Communicating with AI\n",
505
+ "Skip to main contentLearn PromptingEnglishEnglishEspañolFrançais日本語Português简体中文한국어සිංහලРусскийالعربيةChange LogSearch⌘KWelcome😃 Basics🟢 Introduction🟢 Prompting🟢 Giving Instructions🟢 Role Prompting🟢 Few shot prompting🟢 Combining Techniques🟢 Formalizing Prompts🟢 Chatbot Basics🟢 Pitfalls of LLMs🟢 LLM Settings🟢 Understanding AI Minds🟢 Starting Your Journey💼 Basic Applications🧙‍♂️ Intermediate🧪 Applied Prompting🚀 Advanced Applications⚖️ Reliability🖼️ Image Prompting🔓 Prompt Hacking🔨 Tooling💪 Prompt Tuning🎲 Miscellaneous📙 Vocabulary Reference📚 Bibliography📦 Prompted Products🛸 Additional Resources✨ Credits🔥 Hot Topics😃 Basics🟢 Starting Your JourneyOn this page🟢 Starting Your JourneyNow that you have learned about the basics of prompt engineering, you are ready to start prompt engineering on your own. The rest of this course will contain additional techniques and resources, but the best way of learning PE is to start experimenting with your own prompts. This page will show you how to get started with solving an arbitrary prompt engineering problem.Let's say you want a prompt that will generate an immersive, interactive story with ChatGPT.Step 1: Research​The first thing to do is research the prompt you want. Aside from Googling, the following resources are very valuable:Awesome ChatGPT PromptsFlowGPTr/PromptDesign (and similar subreddits)Learn Prompting Discord (and other relevant Discords, including the OpenAI Discord)You might find this one by Heroj04 to be a good starting point:I want you to act as a text based adventure game. I will type commands and you will reply with a description of what the character sees. I want you to only reply with\n",
506
+ "Source: https://learnprompting.org/docs/basics/starting_your_journey\n",
507
+ "---------------------\n",
508
+ "\n",
509
+ "The sources above are NOT related to the conversation with the user. Ignore the sources if user is engaging in small talk.\n",
510
+ "DO NOT return any sources if the conversation is just chit-chat/small talk. Return ALL the source URLs if conversation is not small talk.\n",
511
+ "\n",
512
+ "Chat History:\n",
513
+ "\n",
514
+ "Human: what is prompting? explain in detail\n",
515
+ "Assistant: User: Can you explain what prompting is?\n",
516
+ "\n",
517
+ "SPARK: Sure! Prompting is the process of providing instructions or input to an AI model to guide its behavior and generate desired outputs. It involves crafting a prompt, which can be a simple instruction or a more complex set of instructions, and feeding it to the AI model. The model then uses the prompt to generate a response or perform a specific task. The goal of prompting is to elicit accurate and relevant outputs from the AI model by providing clear and specific instructions. Would you like more information on prompt engineering techniques and best practices?\n",
518
+ "\n",
519
+ "Sources:\n",
520
+ "- https://learnprompting.org/docs/basics/prompting\n",
521
+ "- https://learnprompting.org/docs/category/-basics\n",
522
+ "- https://learnprompting.org/docs/intermediate/self_consistency\n",
523
+ "Human: what is prompting? explain in detail\n",
524
+ "Assistant: Prompting is the process of providing instructions or input to an AI model to guide its behavior and generate desired outputs. It involves crafting a prompt, which can be a simple instruction or a more complex set of instructions, and feeding it to the AI model. The model then uses the prompt to generate a response or perform a specific task.\n",
525
+ "\n",
526
+ "The key to effective prompting is to provide clear and specific instructions that help the model understand the desired outcome. This can involve specifying the format of the response, providing examples or demonstrations, or offering additional context to guide the model's understanding.\n",
527
+ "\n",
528
+ "Prompt engineering is an important aspect of optimizing AI model performance. It involves experimenting with different prompts, iterating on them, and fine-tuning them to achieve the desired results. By refining and optimizing prompts, developers can improve the accuracy, relevance, and coherence of the model's outputs.\n",
529
+ "\n",
530
+ "Some best practices in prompt engineering include:\n",
531
+ "1. Specificity: Being very specific about the instruction and task you want the model to perform. The more descriptive and detailed the prompt is, the better the results.\n",
532
+ "2. Simplicity: Keeping the prompt concise and avoiding unnecessary complexity. Including only relevant details that contribute to the task at hand.\n",
533
+ "3. Context: Providing relevant context or examples to help the model understand the desired outcome and generate more accurate responses.\n",
534
+ "4. Iteration: Iterating on the prompt design and experimenting with different instructions, keywords, and data to find the most effective approach for a specific use case or task.\n",
535
+ "5. Self-Consistency: Ensuring that the prompt is internally consistent and aligns with the model's capabilities and limitations.\n",
536
+ "\n",
537
+ "Would you like more information on prompt engineering techniques and best practices?\n",
538
+ "\n",
539
+ "Sources:\n",
540
+ "- https://learnprompting.org/docs/basics/prompting\n",
541
+ "- https://learnprompting.org/docs/category/-basics\n",
542
+ "- https://learnprompting.org/docs/intermediate/self_consistency\n",
543
+ "\u001b[0m\n",
544
+ "\n",
545
+ "\u001b[1m> Finished chain.\u001b[0m\n",
546
+ "\n",
547
+ "\u001b[1m> Finished chain.\u001b[0m\n",
548
+ "\n",
549
+ "\u001b[1m> Finished chain.\u001b[0m\n",
550
+ "{'answer': \"Prompting is the process of providing instructions or input to an AI model to guide its behavior and generate desired outputs. It involves crafting a prompt, which can be a simple instruction or a more complex set of instructions, and feeding it to the AI model. The model then uses the prompt to generate a response or perform a specific task.\\n\\nThe key to effective prompting is to provide clear and specific instructions that help the model understand the desired outcome. This can involve specifying the format of the response, providing examples or demonstrations, or offering additional context to guide the model's understanding.\\n\\nPrompt engineering is an important aspect of optimizing AI model performance. It involves experimenting with different prompts, iterating on them, and fine-tuning them to achieve the desired results. By refining and optimizing prompts, developers can improve the accuracy, relevance, and coherence of the model's outputs.\\n\\nSome best practices in prompt engineering include:\\n1. Specificity: Being very specific about the instruction and task you want the model to perform. The more descriptive and detailed the prompt is, the better the results.\\n2. Simplicity: Keeping the prompt concise and avoiding unnecessary complexity. Including only relevant details that contribute to the task at hand.\\n3. Context: Providing relevant context or examples to help the model understand the desired outcome and generate more accurate responses.\\n4. Iteration: Iterating on the prompt design and experimenting with different instructions, keywords, and data to find the most effective approach for a specific use case or task.\\n5. Self-Consistency: Ensuring that the prompt is internally consistent and aligns with the model's capabilities and limitations.\\n\\nWould you like more information on prompt engineering techniques and best practices?\\n\\nSources:\\n- [Learn Prompting: Prompting Basics](https://learnprompting.org/docs/basics/prompting)\\n- [Learn Prompting: Basics Category](https://learnprompting.org/docs/category/-basics)\\n- [Learn Prompting: Intermediate - Self-Consistency](https://learnprompting.org/docs/intermediate/self_consistency)\"}\n"
551
+ ]
552
+ }
553
+ ],
554
+ "source": [
555
+ "query = \"What did I just ask?\"\n",
556
+ "result = chain({\"question\": query}, return_only_outputs=True)\n",
557
+ "print(result)"
558
+ ]
559
+ },
560
+ {
561
+ "attachments": {},
562
+ "cell_type": "markdown",
563
+ "metadata": {},
564
+ "source": [
565
+ "Clear memory if it exceeds context length"
566
+ ]
567
+ },
568
+ {
569
+ "cell_type": "code",
570
+ "execution_count": 42,
571
+ "metadata": {},
572
+ "outputs": [],
573
+ "source": [
574
+ "memory.clear()"
575
+ ]
576
+ },
577
+ {
578
+ "cell_type": "code",
579
+ "execution_count": 50,
580
+ "metadata": {},
581
+ "outputs": [
582
+ {
583
+ "name": "stdout",
584
+ "output_type": "stream",
585
+ "text": [
586
+ "\n",
587
+ "\n",
588
+ "\u001b[1m> Entering new chain...\u001b[0m\n",
589
+ "\n",
590
+ "\n",
591
+ "\u001b[1m> Entering new chain...\u001b[0m\n",
592
+ "Prompt after formatting:\n",
593
+ "\u001b[32;1m\u001b[1;3mBelow is a summary of the conversation so far, and a new question asked by the user that needs to be answered by searching in a knowledge base. Generate a search query based on the conversation and the new question.\n",
594
+ "\n",
595
+ "Chat History:\n",
596
+ "\n",
597
+ "Assistant: User: Can you explain what prompting is?\n",
598
+ "\n",
599
+ "SPARK: Sure! Prompting is the process of providing instructions or input to an AI model to guide its behavior and generate desired outputs. It involves crafting a prompt, which can be a simple instruction or a more complex set of instructions, and feeding it to the AI model. The model then uses the prompt to generate a response or perform a specific task. The goal of prompting is to elicit accurate and relevant outputs from the AI model by providing clear and specific instructions. Would you like more information on prompt engineering techniques and best practices?\n",
600
+ "\n",
601
+ "Sources:\n",
602
+ "- https://learnprompting.org/docs/basics/prompting\n",
603
+ "- https://learnprompting.org/docs/category/-basics\n",
604
+ "- https://learnprompting.org/docs/intermediate/self_consistency\n",
605
+ "Human: what is prompting? explain in detail\n",
606
+ "Assistant: Prompting is the process of providing instructions or input to an AI model to guide its behavior and generate desired outputs. It involves crafting a prompt, which can be a simple instruction or a more complex set of instructions, and feeding it to the AI model. The model then uses the prompt to generate a response or perform a specific task.\n",
607
+ "\n",
608
+ "The key to effective prompting is to provide clear and specific instructions that help the model understand the desired outcome. This can involve specifying the format of the response, providing examples or demonstrations, or offering additional context to guide the model's understanding.\n",
609
+ "\n",
610
+ "Prompt engineering is an important aspect of optimizing AI model performance. It involves experimenting with different prompts, iterating on them, and fine-tuning them to achieve the desired results. By refining and optimizing prompts, developers can improve the accuracy, relevance, and coherence of the model's outputs.\n",
611
+ "\n",
612
+ "Some best practices in prompt engineering include:\n",
613
+ "1. Specificity: Being very specific about the instruction and task you want the model to perform. The more descriptive and detailed the prompt is, the better the results.\n",
614
+ "2. Simplicity: Keeping the prompt concise and avoiding unnecessary complexity. Including only relevant details that contribute to the task at hand.\n",
615
+ "3. Context: Providing relevant context or examples to help the model understand the desired outcome and generate more accurate responses.\n",
616
+ "4. Iteration: Iterating on the prompt design and experimenting with different instructions, keywords, and data to find the most effective approach for a specific use case or task.\n",
617
+ "5. Self-Consistency: Ensuring that the prompt is internally consistent and aligns with the model's capabilities and limitations.\n",
618
+ "\n",
619
+ "Would you like more information on prompt engineering techniques and best practices?\n",
620
+ "\n",
621
+ "Sources:\n",
622
+ "- https://learnprompting.org/docs/basics/prompting\n",
623
+ "- https://learnprompting.org/docs/category/-basics\n",
624
+ "- https://learnprompting.org/docs/intermediate/self_consistency\n",
625
+ "Human: What did I just ask?\n",
626
+ "Assistant: Prompting is the process of providing instructions or input to an AI model to guide its behavior and generate desired outputs. It involves crafting a prompt, which can be a simple instruction or a more complex set of instructions, and feeding it to the AI model. The model then uses the prompt to generate a response or perform a specific task.\n",
627
+ "\n",
628
+ "The key to effective prompting is to provide clear and specific instructions that help the model understand the desired outcome. This can involve specifying the format of the response, providing examples or demonstrations, or offering additional context to guide the model's understanding.\n",
629
+ "\n",
630
+ "Prompt engineering is an important aspect of optimizing AI model performance. It involves experimenting with different prompts, iterating on them, and fine-tuning them to achieve the desired results. By refining and optimizing prompts, developers can improve the accuracy, relevance, and coherence of the model's outputs.\n",
631
+ "\n",
632
+ "Some best practices in prompt engineering include:\n",
633
+ "1. Specificity: Being very specific about the instruction and task you want the model to perform. The more descriptive and detailed the prompt is, the better the results.\n",
634
+ "2. Simplicity: Keeping the prompt concise and avoiding unnecessary complexity. Including only relevant details that contribute to the task at hand.\n",
635
+ "3. Context: Providing relevant context or examples to help the model understand the desired outcome and generate more accurate responses.\n",
636
+ "4. Iteration: Iterating on the prompt design and experimenting with different instructions, keywords, and data to find the most effective approach for a specific use case or task.\n",
637
+ "5. Self-Consistency: Ensuring that the prompt is internally consistent and aligns with the model's capabilities and limitations.\n",
638
+ "\n",
639
+ "Would you like more information on prompt engineering techniques and best practices?\n",
640
+ "\n",
641
+ "Sources:\n",
642
+ "- [Learn Prompting: Prompting Basics](https://learnprompting.org/docs/basics/prompting)\n",
643
+ "- [Learn Prompting: Basics Category](https://learnprompting.org/docs/category/-basics)\n",
644
+ "- [Learn Prompting: Intermediate - Self-Consistency](https://learnprompting.org/docs/intermediate/self_consistency)\n",
645
+ "\n",
646
+ "Question:\n",
647
+ "do u remember our conversation?\n",
648
+ "\n",
649
+ "Search query:\n",
650
+ "\u001b[0m\n",
651
+ "\n",
652
+ "\u001b[1m> Finished chain.\u001b[0m\n",
653
+ "\n",
654
+ "\n",
655
+ "\u001b[1m> Entering new chain...\u001b[0m\n",
656
+ "\n",
657
+ "\n",
658
+ "\u001b[1m> Entering new chain...\u001b[0m\n",
659
+ "Prompt after formatting:\n",
660
+ "\u001b[32;1m\u001b[1;3mSystem: You are SPARK, a Prompt Engineering Assistant created by Conversational AI Developer - Amogh Agastya (https://amagastya.com).\n",
661
+ "SPARK stands for Smart Prompt Assistant and Resource Knowledgebase.\n",
662
+ "\n",
663
+ "You are an AI-powered assistant that exudes a friendly and knowledgeable persona. You are designed to be a reliable and trustworthy guide in the\n",
664
+ "world of prompt engineering. With a passion for prompt optimization and a deep understanding of AI models, SPARK is committed to helping users navigate the field of prompt engineering and craft\n",
665
+ "high-performing prompts.\n",
666
+ "\n",
667
+ "Personality:\n",
668
+ "Intelligent: SPARK is highly knowledgeable about prompt engineering concepts and practices. It possesses a vast array of information and resources to share with users, making it an expert in its field.\n",
669
+ "\n",
670
+ "Patient: SPARK understands that prompt engineering can be complex and requires careful attention to detail. It patiently guides users through the intricacies of crafting prompts, offering support at every step.\n",
671
+ "\n",
672
+ "Adaptable: SPARK recognizes that prompt engineering is a dynamic field with evolving best practices. It stays up to date with the latest trends and developments, adapting its knowledge and recommendations accordingly.\n",
673
+ "\n",
674
+ "Interactions with SPARK:\n",
675
+ "Users can engage with SPARK by seeking advice on prompt design, exploring prompt engineering concepts, discussing challenges they encounter, and receiving recommendations for improving AI model performance. SPARK responds promptly, providing clear and concise explanations, examples, and actionable tips.\n",
676
+ "\n",
677
+ "Important:\n",
678
+ "Answer with the facts listed in the list of sources below. If there isn't enough information below, say you don't know. If asking a clarifying question to the user would help, ask the question. \n",
679
+ "ALWAYS return a \"SOURCES\" part in your answer, except for small-talk conversations.\n",
680
+ "\n",
681
+ "Example: Which state/country's law governs the interpretation of the contract?\n",
682
+ "=========\n",
683
+ "Content: This Agreement is governed by English law and the parties submit to the exclusive jurisdiction of the English courts in relation to any dispute (contractual or non-contractual) concerning this Agreement save that either party may apply to any court for an injunction or other relief to protect its Intellectual Property Rights.\n",
684
+ "Source: htps://agreement.com/page1\n",
685
+ "Content: No Waiver. Failure or delay in exercising any right or remedy under this Agreement shall not constitute a waiver of such (or any other) right or remedy.\n",
686
+ "\n",
687
+ "11.7 Severability. The invalidity, illegality or unenforceability of any term (or part of a term) of this Agreement shall not affect the continuation in force of the remainder of the term (if any) and this Agreement.\n",
688
+ "\n",
689
+ "11.8 No Agency. Except as expressly stated otherwise, nothing in this Agreement shall create an agency, partnership or joint venture of any kind between the parties.\n",
690
+ "Source: htps://agreement.com/page2\n",
691
+ "=========\n",
692
+ "FINAL ANSWER: This Agreement is governed by English law.\n",
693
+ "SOURCES: - htps://agreement.com/page1, - htps://agreement.com/page2\n",
694
+ "Note: Return all the source URLs present within the sources.\n",
695
+ "\n",
696
+ "Question: do u remember our conversation?\n",
697
+ "Source:\n",
698
+ "---------------------\n",
699
+ " Content: including tips, applications, limitations, papers, and additional reading materials.\n",
700
+ "⚠️This section is under heavy development.\n",
701
+ "Topics:\n",
702
+ "ChatGPT Introduction\n",
703
+ "Reviewing The Conversation Task\n",
704
+ "Conversations with ChatGPT\n",
705
+ "ChatGPT Introduction\n",
706
+ "ChatGPT is a new model trained by OpenAI (opens in a new tab) that has the capability to interact in a conversational way. This model is trained to follow instructions in a prompt to provide appropriate responses in the context of a dialogue. ChatGPT can help with answering questions, suggesting recipes, writing lyrics in a certain style, generating code, and much more.\n",
707
+ "ChatGPT is trained using Reinforcement Learning from Human Feedback (RLHF). While this model is a lot more capable than previous GPT iterations (and also trained to reduce harmful and untruthful outputs), it still comes with limitations. Let's cover some of the capabilities and limitations with concrete examples.\n",
708
+ "You can use the research preview of ChatGPT here but for the examples below we will use the Chat mode on the OpenAI Playground.\n",
709
+ "Reviewing The Conversation Task\n",
710
+ "In one of the previous guides, we covered a bit about conversation capabilities and role prompting. We covered how to instruct the LLM to have a conversation in a specific style, with a specific intent, behavior, and identity.\n",
711
+ "Let's review our previous basic example where we created a conversational system that's able to generate more technical and scientific responses to questions.\n",
712
+ "Prompt:\n",
713
+ "The following is a conversation with an AI research assistant. The assistant tone is technical and scientific.\n",
714
+ "Human: Hello, who are you?\n",
715
+ "AI: Greeting! I am an AI research assistant. How can I help you today?\n",
716
+ "Human: Can you tell me about the creation of black holes?\n",
717
+ "AI:\n",
718
+ "From the example above, you can see two important components:\n",
719
+ "the intent or explanation of what the chatbot is\n",
720
+ "the identity which instructs the style or tone the chatbot will use to respond\n",
721
+ "The simple example above works well with the text completion APIs that uses text-davinci-003. More recently, OpenAI announced the ChatGPT APIs (opens in a new tab), which is a more powerful and cheaper model called gpt-3.5-turbo was specifically built for this type of functionality (chat completions). In fact, OpenAI recommends this\n",
722
+ "Source: http://promptingguide.ai//models/chatgpt\n",
723
+ "\n",
724
+ "Content: 2022). Discovering Language Model Behaviors with Model-Written Evaluations.\n",
725
+ "↩Su, H., Kasai, J., Wu, C. H., Shi, W., Wang, T., Xin, J., Zhang, R., Ostendorf, M., Zettlemoyer, L., Smith, N. A., & Yu, T. (2022). Selective Annotation Makes Language Models Better Few-Shot Learners.\n",
726
+ "↩Izacard, G., Lewis, P., Lomeli, M., Hosseini, L., Petroni, F., Schick, T., Dwivedi-Yu, J., Joulin, A., Riedel, S., & Grave, E. (2022). Atlas: Few-shot Learning with Retrieval Augmented Language Models.\n",
727
+ "↩Wang, B., Feng, C., Nair, A., Mao, M., Desai, J., Celikyilmaz, A., Li, H., Mehdad, Y., & Radev, D. (2022). STRUDEL: Structured Dialogue Summarization for Dialogue Comprehension.\n",
728
+ "↩Significant-Gravitas. (2023). Auto-GPT. https://news.agpt.co/\n",
729
+ "↩Nakajima, Y. (2023). Baby AGI. https://github.com/yoheinakajima/babyagi\n",
730
+ "↩Reworkd.ai. (2023). AgentGPT. https://github.com/reworkd/AgentGPT\n",
731
+ "↩Beurer-Kellner, L., Fischer, M., & Vechev, M. (2022). Prompting Is Programming: A Query Language For Large Language Models.\n",
732
+ "↩Ratner, N., Levine, Y., Belinkov, Y., Ram, O., Abend, O., Karpas, E., Shashua, A., Leyton-Brown, K., & Shoham, Y. (2022). Parallel Context Windows Improve In-Context Learning of Large Language Models.\n",
733
+ "↩White, J., Fu, Q., Hays, S., Sandborn, M., Olea, C., Gilbert, H., Elnashar, A., Spencer-Smith, J., & Schmidt, D. C. (2023). A Prompt\n",
734
+ "Source: https://learnprompting.org/docs/bibliography\n",
735
+ "\n",
736
+ "Content: Few-shot Learners for Prognostic Prediction (opens in a new tab) (Feb 2023)\n",
737
+ "STA: Self-controlled Text Augmentation for Improving Text Classifications (opens in a new tab) (Feb 2023)\n",
738
+ "Check Your Facts and Try Again: Improving Large Language Models with External Knowledge and Automated Feedback (opens in a new tab) (Feb 2023)\n",
739
+ "How Generative AI models such as ChatGPT can be (Mis)Used in SPC Practice, Education, and Research? An Exploratory Study (opens in a new tab) (Feb 2023)\n",
740
+ "Grimm in Wonderland: Prompt Engineering with Midjourney to Illustrate Fairytales (opens in a new tab) (Feb 2023)\n",
741
+ "LabelPrompt: Effective Prompt-based Learning for Relation Classification (opens in a new tab) (Feb 2023)\n",
742
+ "Language Model Crossover: Variation through Few-Shot Prompting (opens in a new tab) (Feb 2023)\n",
743
+ "Prompt Tuning of Deep Neural Networks for Speaker-adaptive Visual Speech Recognition (opens in a new tab) (Feb 2023)\n",
744
+ "The Capacity for Moral Self-Correction in Large Language Models (opens in a new tab) (Feb 2023)\n",
745
+ "Prompting for Multimodal Hateful Meme Classification (opens in a new tab) (Feb 2023)\n",
746
+ "PLACES: Prompting Language Models for Social Conversation Synthesis (opens in a new tab) (Feb 2023)\n",
747
+ "Toolformer: Language Models Can Teach Themselves to Use Tools (opens in a new tab) (Feb 2023)\n",
748
+ "Commonsense-Aware Prompting for Controllable Empathetic Dialogue Generation (opens in a new tab) (Feb 2023)\n",
749
+ "Crawling the Internal Knowledge-Base of Language Models (opens in a new tab) (Jan 2023)\n",
750
+ "Legal Prompt Engineering for Multilingual Legal Judgement Prediction (opens in a new tab) (Dec 2022)\n",
751
+ "Investigating Prompt Engineering in Diffusion Models (opens in a new tab) (Nov 2022)\n",
752
+ "Learn to Explain: Multimodal Reasoning via Thought Chains for Science Question Answering (opens in a new tab) (Sep 2022)\n",
753
+ "Conversing with Copilot: Exploring Prompt Engineering for Solving CS1 Problems Using Natural Language (opens in a new tab) (Oct 2022)\n",
754
+ "Pil\n",
755
+ "Source: http://promptingguide.ai//papers\n",
756
+ "---------------------\n",
757
+ "\n",
758
+ "The sources above are NOT related to the conversation with the user. Ignore the sources if user is engaging in small talk.\n",
759
+ "DO NOT return any sources if the conversation is just chit-chat/small talk. Return ALL the source URLs if conversation is not small talk.\n",
760
+ "\n",
761
+ "Chat History:\n",
762
+ "\n",
763
+ "Assistant: User: Can you explain what prompting is?\n",
764
+ "\n",
765
+ "SPARK: Sure! Prompting is the process of providing instructions or input to an AI model to guide its behavior and generate desired outputs. It involves crafting a prompt, which can be a simple instruction or a more complex set of instructions, and feeding it to the AI model. The model then uses the prompt to generate a response or perform a specific task. The goal of prompting is to elicit accurate and relevant outputs from the AI model by providing clear and specific instructions. Would you like more information on prompt engineering techniques and best practices?\n",
766
+ "\n",
767
+ "Sources:\n",
768
+ "- https://learnprompting.org/docs/basics/prompting\n",
769
+ "- https://learnprompting.org/docs/category/-basics\n",
770
+ "- https://learnprompting.org/docs/intermediate/self_consistency\n",
771
+ "Human: what is prompting? explain in detail\n",
772
+ "Assistant: Prompting is the process of providing instructions or input to an AI model to guide its behavior and generate desired outputs. It involves crafting a prompt, which can be a simple instruction or a more complex set of instructions, and feeding it to the AI model. The model then uses the prompt to generate a response or perform a specific task.\n",
773
+ "\n",
774
+ "The key to effective prompting is to provide clear and specific instructions that help the model understand the desired outcome. This can involve specifying the format of the response, providing examples or demonstrations, or offering additional context to guide the model's understanding.\n",
775
+ "\n",
776
+ "Prompt engineering is an important aspect of optimizing AI model performance. It involves experimenting with different prompts, iterating on them, and fine-tuning them to achieve the desired results. By refining and optimizing prompts, developers can improve the accuracy, relevance, and coherence of the model's outputs.\n",
777
+ "\n",
778
+ "Some best practices in prompt engineering include:\n",
779
+ "1. Specificity: Being very specific about the instruction and task you want the model to perform. The more descriptive and detailed the prompt is, the better the results.\n",
780
+ "2. Simplicity: Keeping the prompt concise and avoiding unnecessary complexity. Including only relevant details that contribute to the task at hand.\n",
781
+ "3. Context: Providing relevant context or examples to help the model understand the desired outcome and generate more accurate responses.\n",
782
+ "4. Iteration: Iterating on the prompt design and experimenting with different instructions, keywords, and data to find the most effective approach for a specific use case or task.\n",
783
+ "5. Self-Consistency: Ensuring that the prompt is internally consistent and aligns with the model's capabilities and limitations.\n",
784
+ "\n",
785
+ "Would you like more information on prompt engineering techniques and best practices?\n",
786
+ "\n",
787
+ "Sources:\n",
788
+ "- https://learnprompting.org/docs/basics/prompting\n",
789
+ "- https://learnprompting.org/docs/category/-basics\n",
790
+ "- https://learnprompting.org/docs/intermediate/self_consistency\n",
791
+ "Human: What did I just ask?\n",
792
+ "Assistant: Prompting is the process of providing instructions or input to an AI model to guide its behavior and generate desired outputs. It involves crafting a prompt, which can be a simple instruction or a more complex set of instructions, and feeding it to the AI model. The model then uses the prompt to generate a response or perform a specific task.\n",
793
+ "\n",
794
+ "The key to effective prompting is to provide clear and specific instructions that help the model understand the desired outcome. This can involve specifying the format of the response, providing examples or demonstrations, or offering additional context to guide the model's understanding.\n",
795
+ "\n",
796
+ "Prompt engineering is an important aspect of optimizing AI model performance. It involves experimenting with different prompts, iterating on them, and fine-tuning them to achieve the desired results. By refining and optimizing prompts, developers can improve the accuracy, relevance, and coherence of the model's outputs.\n",
797
+ "\n",
798
+ "Some best practices in prompt engineering include:\n",
799
+ "1. Specificity: Being very specific about the instruction and task you want the model to perform. The more descriptive and detailed the prompt is, the better the results.\n",
800
+ "2. Simplicity: Keeping the prompt concise and avoiding unnecessary complexity. Including only relevant details that contribute to the task at hand.\n",
801
+ "3. Context: Providing relevant context or examples to help the model understand the desired outcome and generate more accurate responses.\n",
802
+ "4. Iteration: Iterating on the prompt design and experimenting with different instructions, keywords, and data to find the most effective approach for a specific use case or task.\n",
803
+ "5. Self-Consistency: Ensuring that the prompt is internally consistent and aligns with the model's capabilities and limitations.\n",
804
+ "\n",
805
+ "Would you like more information on prompt engineering techniques and best practices?\n",
806
+ "\n",
807
+ "Sources:\n",
808
+ "- [Learn Prompting: Prompting Basics](https://learnprompting.org/docs/basics/prompting)\n",
809
+ "- [Learn Prompting: Basics Category](https://learnprompting.org/docs/category/-basics)\n",
810
+ "- [Learn Prompting: Intermediate - Self-Consistency](https://learnprompting.org/docs/intermediate/self_consistency)\n",
811
+ "\u001b[0m\n",
812
+ "\n",
813
+ "\u001b[1m> Finished chain.\u001b[0m\n",
814
+ "\n",
815
+ "\u001b[1m> Finished chain.\u001b[0m\n",
816
+ "\n",
817
+ "\u001b[1m> Finished chain.\u001b[0m\n",
818
+ "{'answer': \"I'm sorry, but as an AI, I don't have the capability to remember past conversations. Is there something specific you would like to discuss or ask about? I'm here to help!\"}\n"
819
+ ]
820
+ }
821
+ ],
822
+ "source": [
823
+ "query = \"do u remember our conversation?\"\n",
824
+ "result = chain({\"question\": query}, return_only_outputs=True)\n",
825
+ "print(result)"
826
+ ]
827
+ }
828
+ ],
829
+ "metadata": {
830
+ "kernelspec": {
831
+ "display_name": "Python 3",
832
+ "language": "python",
833
+ "name": "python3"
834
+ },
835
+ "language_info": {
836
+ "codemirror_mode": {
837
+ "name": "ipython",
838
+ "version": 3
839
+ },
840
+ "file_extension": ".py",
841
+ "mimetype": "text/x-python",
842
+ "name": "python",
843
+ "nbconvert_exporter": "python",
844
+ "pygments_lexer": "ipython3",
845
+ "version": "3.9.13"
846
+ },
847
+ "orig_nbformat": 4
848
+ },
849
+ "nbformat": 4,
850
+ "nbformat_minor": 2
851
+ }
notebooks/index_data.ipynb ADDED
The diff for this file is too large to render. See raw diff
 
poetry.lock ADDED
The diff for this file is too large to render. See raw diff
 
pyproject.toml ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [tool.poetry]
2
+ name = "spark"
3
+ version = "0.1.0"
4
+ description = ""
5
+ authors = ["Amogh Agastya <hello@amagastya.com>"]
6
+ readme = "README.md"
7
+
8
+ [tool.poetry.dependencies]
9
+ python = "^3.9"
10
+ chainlit = "0.6.0"
11
+ langchain = "^0.0.234"
12
+ openai = "^0.27.8"
13
+ pinecone-client = "^2.2.2"
14
+ cohere = "^4.13.1"
15
+ tiktoken = "^0.4.0"
16
+
17
+
18
+ [build-system]
19
+ requires = ["poetry-core"]
20
+ build-backend = "poetry.core.masonry.api"