Spaces:
Sleeping
Sleeping
Omar Solano
commited on
Commit
β’
a393007
1
Parent(s):
08b8fbf
update gpt-3.5 version
Browse files- notebooks/01-Basic_Tutor.ipynb +4 -4
- notebooks/02-Basic_RAG.ipynb +3 -3
- notebooks/03-RAG_with_LlamaIndex.ipynb +4 -2
- notebooks/04-RAG_with_VectorStore.ipynb +5 -2
- notebooks/05-Improve_Prompts_+_Add_Source.ipynb +2 -2
- notebooks/06-Evaluate_RAG.ipynb +3 -3
- notebooks/07-RAG_Improve_Chunking.ipynb +4 -4
- notebooks/08-Finetune_Embedding.ipynb +1 -1
- notebooks/09-Better_Embedding_Model.ipynb +3 -3
- notebooks/10-Adding_Reranking.ipynb +2 -2
- notebooks/11-Adding_Hybrid_Search.ipynb +2 -2
- notebooks/12-Improve_Query.ipynb +3 -3
- notebooks/14-Adding_Chat.ipynb +2 -2
- notebooks/15-Use_OpenSource_Models.ipynb +1 -1
- scripts/basic_tutor.py +13 -10
notebooks/01-Basic_Tutor.ipynb
CHANGED
@@ -99,7 +99,7 @@
|
|
99 |
},
|
100 |
"outputs": [],
|
101 |
"source": [
|
102 |
-
"# Defining a function to answer a question using \"gpt-3.5-turbo-
|
103 |
"def ask_ai_tutor(question):\n",
|
104 |
" try:\n",
|
105 |
" # Formulating the system prompt and condition the model to answer only AI-related questions.\n",
|
@@ -113,8 +113,8 @@
|
|
113 |
"\n",
|
114 |
" # Call the OpenAI API\n",
|
115 |
" response = client.chat.completions.create(\n",
|
116 |
-
" model='gpt-3.5-turbo-
|
117 |
-
" temperature=0
|
118 |
" messages=[\n",
|
119 |
" {\"role\": \"system\", \"content\": system_prompt},\n",
|
120 |
" {\"role\": \"user\", \"content\": prompt}\n",
|
@@ -218,7 +218,7 @@
|
|
218 |
"name": "python",
|
219 |
"nbconvert_exporter": "python",
|
220 |
"pygments_lexer": "ipython3",
|
221 |
-
"version": "3.11.
|
222 |
}
|
223 |
},
|
224 |
"nbformat": 4,
|
|
|
99 |
},
|
100 |
"outputs": [],
|
101 |
"source": [
|
102 |
+
"# Defining a function to answer a question using \"gpt-3.5-turbo-0125\" model.\n",
|
103 |
"def ask_ai_tutor(question):\n",
|
104 |
" try:\n",
|
105 |
" # Formulating the system prompt and condition the model to answer only AI-related questions.\n",
|
|
|
113 |
"\n",
|
114 |
" # Call the OpenAI API\n",
|
115 |
" response = client.chat.completions.create(\n",
|
116 |
+
" model='gpt-3.5-turbo-0125',\n",
|
117 |
+
" temperature=0,\n",
|
118 |
" messages=[\n",
|
119 |
" {\"role\": \"system\", \"content\": system_prompt},\n",
|
120 |
" {\"role\": \"user\", \"content\": prompt}\n",
|
|
|
218 |
"name": "python",
|
219 |
"nbconvert_exporter": "python",
|
220 |
"pygments_lexer": "ipython3",
|
221 |
+
"version": "3.11.8"
|
222 |
}
|
223 |
},
|
224 |
"nbformat": 4,
|
notebooks/02-Basic_RAG.ipynb
CHANGED
@@ -533,7 +533,7 @@
|
|
533 |
"\n",
|
534 |
" # Call the OpenAI API\n",
|
535 |
" response = client.chat.completions.create(\n",
|
536 |
-
" model='gpt-3.5-turbo-
|
537 |
" temperature=0.0,\n",
|
538 |
" messages=[\n",
|
539 |
" {\"role\": \"system\", \"content\": system_prompt},\n",
|
@@ -610,7 +610,7 @@
|
|
610 |
"\n",
|
611 |
"# Call the OpenAI API\n",
|
612 |
"response = client.chat.completions.create(\n",
|
613 |
-
" model='gpt-3.5-turbo-
|
614 |
" temperature=.9,\n",
|
615 |
" messages=[\n",
|
616 |
" {\"role\": \"system\", \"content\": system_prompt},\n",
|
@@ -680,7 +680,7 @@
|
|
680 |
"name": "python",
|
681 |
"nbconvert_exporter": "python",
|
682 |
"pygments_lexer": "ipython3",
|
683 |
-
"version": "3.11.
|
684 |
},
|
685 |
"widgets": {
|
686 |
"application/vnd.jupyter.widget-state+json": {
|
|
|
533 |
"\n",
|
534 |
" # Call the OpenAI API\n",
|
535 |
" response = client.chat.completions.create(\n",
|
536 |
+
" model='gpt-3.5-turbo-0125',\n",
|
537 |
" temperature=0.0,\n",
|
538 |
" messages=[\n",
|
539 |
" {\"role\": \"system\", \"content\": system_prompt},\n",
|
|
|
610 |
"\n",
|
611 |
"# Call the OpenAI API\n",
|
612 |
"response = client.chat.completions.create(\n",
|
613 |
+
" model='gpt-3.5-turbo-0125',\n",
|
614 |
" temperature=.9,\n",
|
615 |
" messages=[\n",
|
616 |
" {\"role\": \"system\", \"content\": system_prompt},\n",
|
|
|
680 |
"name": "python",
|
681 |
"nbconvert_exporter": "python",
|
682 |
"pygments_lexer": "ipython3",
|
683 |
+
"version": "3.11.8"
|
684 |
},
|
685 |
"widgets": {
|
686 |
"application/vnd.jupyter.widget-state+json": {
|
notebooks/03-RAG_with_LlamaIndex.ipynb
CHANGED
@@ -231,9 +231,11 @@
|
|
231 |
},
|
232 |
"outputs": [],
|
233 |
"source": [
|
|
|
234 |
"# Define a query engine that is responsible for retrieving related pieces of text,\n",
|
235 |
"# and using a LLM to formulate the final answer.\n",
|
236 |
-
"
|
|
|
237 |
]
|
238 |
},
|
239 |
{
|
@@ -290,7 +292,7 @@
|
|
290 |
"name": "python",
|
291 |
"nbconvert_exporter": "python",
|
292 |
"pygments_lexer": "ipython3",
|
293 |
-
"version": "3.11.
|
294 |
}
|
295 |
},
|
296 |
"nbformat": 4,
|
|
|
231 |
},
|
232 |
"outputs": [],
|
233 |
"source": [
|
234 |
+
"from llama_index.llms.openai import OpenAI\n",
|
235 |
"# Define a query engine that is responsible for retrieving related pieces of text,\n",
|
236 |
"# and using a LLM to formulate the final answer.\n",
|
237 |
+
"llm = OpenAI(temperature=0, model=\"gpt-3.5-turbo-0125\", max_tokens=512)\n",
|
238 |
+
"query_engine = index.as_query_engine(llm=llm)"
|
239 |
]
|
240 |
},
|
241 |
{
|
|
|
292 |
"name": "python",
|
293 |
"nbconvert_exporter": "python",
|
294 |
"pygments_lexer": "ipython3",
|
295 |
+
"version": "3.11.8"
|
296 |
}
|
297 |
},
|
298 |
"nbformat": 4,
|
notebooks/04-RAG_with_VectorStore.ipynb
CHANGED
@@ -275,9 +275,12 @@
|
|
275 |
},
|
276 |
"outputs": [],
|
277 |
"source": [
|
|
|
278 |
"# Define a query engine that is responsible for retrieving related pieces of text,\n",
|
279 |
"# and using a LLM to formulate the final answer.\n",
|
280 |
-
"
|
|
|
|
|
281 |
]
|
282 |
},
|
283 |
{
|
@@ -334,7 +337,7 @@
|
|
334 |
"name": "python",
|
335 |
"nbconvert_exporter": "python",
|
336 |
"pygments_lexer": "ipython3",
|
337 |
-
"version": "3.11.
|
338 |
}
|
339 |
},
|
340 |
"nbformat": 4,
|
|
|
275 |
},
|
276 |
"outputs": [],
|
277 |
"source": [
|
278 |
+
"from llama_index.llms.openai import OpenAI\n",
|
279 |
"# Define a query engine that is responsible for retrieving related pieces of text,\n",
|
280 |
"# and using a LLM to formulate the final answer.\n",
|
281 |
+
"\n",
|
282 |
+
"llm = OpenAI(temperature=0, model=\"gpt-3.5-turbo-0125\", max_tokens=512)\n",
|
283 |
+
"query_engine = index.as_query_engine(llm=llm)"
|
284 |
]
|
285 |
},
|
286 |
{
|
|
|
337 |
"name": "python",
|
338 |
"nbconvert_exporter": "python",
|
339 |
"pygments_lexer": "ipython3",
|
340 |
+
"version": "3.11.8"
|
341 |
}
|
342 |
},
|
343 |
"nbformat": 4,
|
notebooks/05-Improve_Prompts_+_Add_Source.ipynb
CHANGED
@@ -82,7 +82,7 @@
|
|
82 |
"source": [
|
83 |
"from llama_index.llms.openai import OpenAI\n",
|
84 |
"\n",
|
85 |
-
"llm = OpenAI(temperature=0.9, model=\"gpt-3.5-turbo\", max_tokens=512)"
|
86 |
]
|
87 |
},
|
88 |
{
|
@@ -724,7 +724,7 @@
|
|
724 |
"name": "python",
|
725 |
"nbconvert_exporter": "python",
|
726 |
"pygments_lexer": "ipython3",
|
727 |
-
"version": "3.11.
|
728 |
},
|
729 |
"widgets": {
|
730 |
"application/vnd.jupyter.widget-state+json": {
|
|
|
82 |
"source": [
|
83 |
"from llama_index.llms.openai import OpenAI\n",
|
84 |
"\n",
|
85 |
+
"llm = OpenAI(temperature=0.9, model=\"gpt-3.5-turbo-0125\", max_tokens=512)"
|
86 |
]
|
87 |
},
|
88 |
{
|
|
|
724 |
"name": "python",
|
725 |
"nbconvert_exporter": "python",
|
726 |
"pygments_lexer": "ipython3",
|
727 |
+
"version": "3.11.8"
|
728 |
},
|
729 |
"widgets": {
|
730 |
"application/vnd.jupyter.widget-state+json": {
|
notebooks/06-Evaluate_RAG.ipynb
CHANGED
@@ -82,7 +82,7 @@
|
|
82 |
"source": [
|
83 |
"from llama_index.llms.openai import OpenAI\n",
|
84 |
"\n",
|
85 |
-
"llm = OpenAI(temperature=0.9, model=\"gpt-3.5-turbo\", max_tokens=512)"
|
86 |
]
|
87 |
},
|
88 |
{
|
@@ -589,7 +589,7 @@
|
|
589 |
"from llama_index.core.evaluation import generate_question_context_pairs\n",
|
590 |
"from llama_index.llms.openai import OpenAI\n",
|
591 |
"\n",
|
592 |
-
"llm = OpenAI(model=\"gpt-3.5-turbo\")\n",
|
593 |
"rag_eval_dataset = generate_question_context_pairs(\n",
|
594 |
" nodes,\n",
|
595 |
" llm=llm,\n",
|
@@ -795,7 +795,7 @@
|
|
795 |
"name": "python",
|
796 |
"nbconvert_exporter": "python",
|
797 |
"pygments_lexer": "ipython3",
|
798 |
-
"version": "3.11.
|
799 |
},
|
800 |
"widgets": {
|
801 |
"application/vnd.jupyter.widget-state+json": {
|
|
|
82 |
"source": [
|
83 |
"from llama_index.llms.openai import OpenAI\n",
|
84 |
"\n",
|
85 |
+
"llm = OpenAI(temperature=0.9, model=\"gpt-3.5-turbo-0125\", max_tokens=512)"
|
86 |
]
|
87 |
},
|
88 |
{
|
|
|
589 |
"from llama_index.core.evaluation import generate_question_context_pairs\n",
|
590 |
"from llama_index.llms.openai import OpenAI\n",
|
591 |
"\n",
|
592 |
+
"llm = OpenAI(model=\"gpt-3.5-turbo-0125\")\n",
|
593 |
"rag_eval_dataset = generate_question_context_pairs(\n",
|
594 |
" nodes,\n",
|
595 |
" llm=llm,\n",
|
|
|
795 |
"name": "python",
|
796 |
"nbconvert_exporter": "python",
|
797 |
"pygments_lexer": "ipython3",
|
798 |
+
"version": "3.11.8"
|
799 |
},
|
800 |
"widgets": {
|
801 |
"application/vnd.jupyter.widget-state+json": {
|
notebooks/07-RAG_Improve_Chunking.ipynb
CHANGED
@@ -82,7 +82,7 @@
|
|
82 |
"source": [
|
83 |
"from llama_index.llms.openai import OpenAI\n",
|
84 |
"\n",
|
85 |
-
"llm = OpenAI(temperature=0.9, model=\"gpt-3.5-turbo\", max_tokens=512)"
|
86 |
]
|
87 |
},
|
88 |
{
|
@@ -758,7 +758,7 @@
|
|
758 |
"\n",
|
759 |
"index_no_metadata = VectorStoreIndex(\n",
|
760 |
" nodes=nodes_no_meta,\n",
|
761 |
-
" service_context=ServiceContext.from_defaults(llm=OpenAI(model=\"gpt-3.5-turbo\")),\n",
|
762 |
")"
|
763 |
]
|
764 |
},
|
@@ -876,7 +876,7 @@
|
|
876 |
"# Create questions for each segment. These questions will be used to\n",
|
877 |
"# assess whether the retriever can accurately identify and return the\n",
|
878 |
"# corresponding segment when queried.\n",
|
879 |
-
"llm = OpenAI(model=\"gpt-3.5-turbo\")\n",
|
880 |
"rag_eval_dataset = generate_question_context_pairs(\n",
|
881 |
" nodes,\n",
|
882 |
" llm=llm,\n",
|
@@ -1035,7 +1035,7 @@
|
|
1035 |
" query_engine = index.as_query_engine(similarity_top_k=i)\n",
|
1036 |
"\n",
|
1037 |
" # While we use GPT3.5-Turbo to answer questions, we can use GPT4 to evaluate the answers.\n",
|
1038 |
-
" llm_gpt4 = OpenAI(temperature=0, model=\"gpt-4-
|
1039 |
" service_context_gpt4 = ServiceContext.from_defaults(llm=llm_gpt4)\n",
|
1040 |
"\n",
|
1041 |
" faithfulness_evaluator = FaithfulnessEvaluator(service_context=service_context_gpt4)\n",
|
|
|
82 |
"source": [
|
83 |
"from llama_index.llms.openai import OpenAI\n",
|
84 |
"\n",
|
85 |
+
"llm = OpenAI(temperature=0.9, model=\"gpt-3.5-turbo-0125\", max_tokens=512)"
|
86 |
]
|
87 |
},
|
88 |
{
|
|
|
758 |
"\n",
|
759 |
"index_no_metadata = VectorStoreIndex(\n",
|
760 |
" nodes=nodes_no_meta,\n",
|
761 |
+
" service_context=ServiceContext.from_defaults(llm=OpenAI(model=\"gpt-3.5-turbo-0125\")),\n",
|
762 |
")"
|
763 |
]
|
764 |
},
|
|
|
876 |
"# Create questions for each segment. These questions will be used to\n",
|
877 |
"# assess whether the retriever can accurately identify and return the\n",
|
878 |
"# corresponding segment when queried.\n",
|
879 |
+
"llm = OpenAI(model=\"gpt-3.5-turbo-0125\")\n",
|
880 |
"rag_eval_dataset = generate_question_context_pairs(\n",
|
881 |
" nodes,\n",
|
882 |
" llm=llm,\n",
|
|
|
1035 |
" query_engine = index.as_query_engine(similarity_top_k=i)\n",
|
1036 |
"\n",
|
1037 |
" # While we use GPT3.5-Turbo to answer questions, we can use GPT4 to evaluate the answers.\n",
|
1038 |
+
" llm_gpt4 = OpenAI(temperature=0, model=\"gpt-4-0125-preview\")\n",
|
1039 |
" service_context_gpt4 = ServiceContext.from_defaults(llm=llm_gpt4)\n",
|
1040 |
"\n",
|
1041 |
" faithfulness_evaluator = FaithfulnessEvaluator(service_context=service_context_gpt4)\n",
|
notebooks/08-Finetune_Embedding.ipynb
CHANGED
@@ -259,7 +259,7 @@
|
|
259 |
"from llama_index.llms.openai import OpenAI\n",
|
260 |
"\n",
|
261 |
"# Load the OpenAI API with the \"gpt-3.5-turbo\" model\n",
|
262 |
-
"llm = OpenAI()\n",
|
263 |
"\n",
|
264 |
"# Generate questions for each chunk.\n",
|
265 |
"TRAIN_DATASET = generate_qa_embedding_pairs(TRAIN_NODEs, llm=llm)\n",
|
|
|
259 |
"from llama_index.llms.openai import OpenAI\n",
|
260 |
"\n",
|
261 |
"# Load the OpenAI API with the \"gpt-3.5-turbo\" model\n",
|
262 |
+
"llm = OpenAI(model=\"gpt-3.5-turbo-0125\")\n",
|
263 |
"\n",
|
264 |
"# Generate questions for each chunk.\n",
|
265 |
"TRAIN_DATASET = generate_qa_embedding_pairs(TRAIN_NODEs, llm=llm)\n",
|
notebooks/09-Better_Embedding_Model.ipynb
CHANGED
@@ -92,7 +92,7 @@
|
|
92 |
"source": [
|
93 |
"from llama_index.llms.openai import OpenAI\n",
|
94 |
"\n",
|
95 |
-
"llm = OpenAI(temperature=0.9, model=\"gpt-3.5-turbo\", max_tokens=512)"
|
96 |
]
|
97 |
},
|
98 |
{
|
@@ -665,7 +665,7 @@
|
|
665 |
"# Create questions for each segment. These questions will be used to\n",
|
666 |
"# assess whether the retriever can accurately identify and return the\n",
|
667 |
"# corresponding segment when queried.\n",
|
668 |
-
"llm = OpenAI(model=\"gpt-3.5-turbo\")\n",
|
669 |
"rag_eval_dataset = generate_question_context_pairs(\n",
|
670 |
" nodes,\n",
|
671 |
" llm=llm,\n",
|
@@ -824,7 +824,7 @@
|
|
824 |
" query_engine = index.as_query_engine(similarity_top_k=i)\n",
|
825 |
"\n",
|
826 |
" # While we use GPT3.5-Turbo to answer questions, we can use GPT4 to evaluate the answers.\n",
|
827 |
-
" llm_gpt4 = OpenAI(temperature=0, model=\"gpt-4-
|
828 |
" service_context_gpt4 = ServiceContext.from_defaults(llm=llm_gpt4)\n",
|
829 |
"\n",
|
830 |
" faithfulness_evaluator = FaithfulnessEvaluator(service_context=service_context_gpt4)\n",
|
|
|
92 |
"source": [
|
93 |
"from llama_index.llms.openai import OpenAI\n",
|
94 |
"\n",
|
95 |
+
"llm = OpenAI(temperature=0.9, model=\"gpt-3.5-turbo-0125\", max_tokens=512)"
|
96 |
]
|
97 |
},
|
98 |
{
|
|
|
665 |
"# Create questions for each segment. These questions will be used to\n",
|
666 |
"# assess whether the retriever can accurately identify and return the\n",
|
667 |
"# corresponding segment when queried.\n",
|
668 |
+
"llm = OpenAI(model=\"gpt-3.5-turbo-0125\")\n",
|
669 |
"rag_eval_dataset = generate_question_context_pairs(\n",
|
670 |
" nodes,\n",
|
671 |
" llm=llm,\n",
|
|
|
824 |
" query_engine = index.as_query_engine(similarity_top_k=i)\n",
|
825 |
"\n",
|
826 |
" # While we use GPT3.5-Turbo to answer questions, we can use GPT4 to evaluate the answers.\n",
|
827 |
+
" llm_gpt4 = OpenAI(temperature=0, model=\"gpt-4-0125-preview\")\n",
|
828 |
" service_context_gpt4 = ServiceContext.from_defaults(llm=llm_gpt4)\n",
|
829 |
"\n",
|
830 |
" faithfulness_evaluator = FaithfulnessEvaluator(service_context=service_context_gpt4)\n",
|
notebooks/10-Adding_Reranking.ipynb
CHANGED
@@ -93,7 +93,7 @@
|
|
93 |
"source": [
|
94 |
"from llama_index.llms.openai import OpenAI\n",
|
95 |
"\n",
|
96 |
-
"llm = OpenAI(temperature=0.9, model=\"gpt-3.5-turbo\", max_tokens=512)"
|
97 |
]
|
98 |
},
|
99 |
{
|
@@ -617,7 +617,7 @@
|
|
617 |
"# Create questions for each segment. These questions will be used to\n",
|
618 |
"# assess whether the retriever can accurately identify and return the\n",
|
619 |
"# corresponding segment when queried.\n",
|
620 |
-
"llm = OpenAI(model=\"gpt-3.5-turbo\")\n",
|
621 |
"rag_eval_dataset = generate_question_context_pairs(\n",
|
622 |
" nodes,\n",
|
623 |
" llm=llm,\n",
|
|
|
93 |
"source": [
|
94 |
"from llama_index.llms.openai import OpenAI\n",
|
95 |
"\n",
|
96 |
+
"llm = OpenAI(temperature=0.9, model=\"gpt-3.5-turbo-0125\", max_tokens=512)"
|
97 |
]
|
98 |
},
|
99 |
{
|
|
|
617 |
"# Create questions for each segment. These questions will be used to\n",
|
618 |
"# assess whether the retriever can accurately identify and return the\n",
|
619 |
"# corresponding segment when queried.\n",
|
620 |
+
"llm = OpenAI(model=\"gpt-3.5-turbo-0125\")\n",
|
621 |
"rag_eval_dataset = generate_question_context_pairs(\n",
|
622 |
" nodes,\n",
|
623 |
" llm=llm,\n",
|
notebooks/11-Adding_Hybrid_Search.ipynb
CHANGED
@@ -91,7 +91,7 @@
|
|
91 |
"source": [
|
92 |
"from llama_index.llms.openai import OpenAI\n",
|
93 |
"\n",
|
94 |
-
"llm = OpenAI(temperature=0.9, model=\"gpt-3.5-turbo\", max_tokens=512)"
|
95 |
]
|
96 |
},
|
97 |
{
|
@@ -807,7 +807,7 @@
|
|
807 |
"# Create questions for each segment. These questions will be used to\n",
|
808 |
"# assess whether the retriever can accurately identify and return the\n",
|
809 |
"# corresponding segment when queried.\n",
|
810 |
-
"llm = OpenAI(model=\"gpt-3.5-turbo\")\n",
|
811 |
"rag_eval_dataset = generate_question_context_pairs(\n",
|
812 |
" nodes,\n",
|
813 |
" llm=llm,\n",
|
|
|
91 |
"source": [
|
92 |
"from llama_index.llms.openai import OpenAI\n",
|
93 |
"\n",
|
94 |
+
"llm = OpenAI(temperature=0.9, model=\"gpt-3.5-turbo-0125\", max_tokens=512)"
|
95 |
]
|
96 |
},
|
97 |
{
|
|
|
807 |
"# Create questions for each segment. These questions will be used to\n",
|
808 |
"# assess whether the retriever can accurately identify and return the\n",
|
809 |
"# corresponding segment when queried.\n",
|
810 |
+
"llm = OpenAI(model=\"gpt-3.5-turbo-0125\")\n",
|
811 |
"rag_eval_dataset = generate_question_context_pairs(\n",
|
812 |
" nodes,\n",
|
813 |
" llm=llm,\n",
|
notebooks/12-Improve_Query.ipynb
CHANGED
@@ -91,7 +91,7 @@
|
|
91 |
"source": [
|
92 |
"from llama_index.llms.openai import OpenAI\n",
|
93 |
"\n",
|
94 |
-
"llm = OpenAI(temperature=0.9, model=\"gpt-3.5-turbo\", max_tokens=512)"
|
95 |
]
|
96 |
},
|
97 |
{
|
@@ -501,7 +501,7 @@
|
|
501 |
"source": [
|
502 |
"from llama_index.core import ServiceContext\n",
|
503 |
"\n",
|
504 |
-
"gpt4 = OpenAI(temperature=0, model=\"gpt-4\")\n",
|
505 |
"service_context_gpt4 = ServiceContext.from_defaults(llm=gpt4)"
|
506 |
]
|
507 |
},
|
@@ -766,7 +766,7 @@
|
|
766 |
"from llama_index.core.indices.query.query_transform.base import StepDecomposeQueryTransform\n",
|
767 |
"from llama_index.core.query_engine.multistep_query_engine import MultiStepQueryEngine\n",
|
768 |
"\n",
|
769 |
-
"gpt3 = OpenAI(temperature=0, model=\"gpt-3.5-turbo\")\n",
|
770 |
"service_context_gpt3 = ServiceContext.from_defaults(llm=gpt3)\n",
|
771 |
"\n",
|
772 |
"step_decompose_transform_gpt3 = StepDecomposeQueryTransform(llm=gpt3, verbose=True)\n",
|
|
|
91 |
"source": [
|
92 |
"from llama_index.llms.openai import OpenAI\n",
|
93 |
"\n",
|
94 |
+
"llm = OpenAI(temperature=0.9, model=\"gpt-3.5-turbo-0125\", max_tokens=512)"
|
95 |
]
|
96 |
},
|
97 |
{
|
|
|
501 |
"source": [
|
502 |
"from llama_index.core import ServiceContext\n",
|
503 |
"\n",
|
504 |
+
"gpt4 = OpenAI(temperature=0, model=\"gpt-4-0125-preview\")\n",
|
505 |
"service_context_gpt4 = ServiceContext.from_defaults(llm=gpt4)"
|
506 |
]
|
507 |
},
|
|
|
766 |
"from llama_index.core.indices.query.query_transform.base import StepDecomposeQueryTransform\n",
|
767 |
"from llama_index.core.query_engine.multistep_query_engine import MultiStepQueryEngine\n",
|
768 |
"\n",
|
769 |
+
"gpt3 = OpenAI(temperature=0, model=\"gpt-3.5-turbo-0125\")\n",
|
770 |
"service_context_gpt3 = ServiceContext.from_defaults(llm=gpt3)\n",
|
771 |
"\n",
|
772 |
"step_decompose_transform_gpt3 = StepDecomposeQueryTransform(llm=gpt3, verbose=True)\n",
|
notebooks/14-Adding_Chat.ipynb
CHANGED
@@ -91,7 +91,7 @@
|
|
91 |
"source": [
|
92 |
"from llama_index.llms.openai import OpenAI\n",
|
93 |
"\n",
|
94 |
-
"llm = OpenAI(temperature=0
|
95 |
]
|
96 |
},
|
97 |
{
|
@@ -740,7 +740,7 @@
|
|
740 |
"outputs": [],
|
741 |
"source": [
|
742 |
"# Define GPT-4 model that will be used by the chat_engine to improve the query.\n",
|
743 |
-
"gpt4 = OpenAI(temperature=0.9, model=\"gpt-4\")"
|
744 |
]
|
745 |
},
|
746 |
{
|
|
|
91 |
"source": [
|
92 |
"from llama_index.llms.openai import OpenAI\n",
|
93 |
"\n",
|
94 |
+
"llm = OpenAI(temperature=0, model=\"gpt-3.5-turbo-0125\", max_tokens=512)"
|
95 |
]
|
96 |
},
|
97 |
{
|
|
|
740 |
"outputs": [],
|
741 |
"source": [
|
742 |
"# Define GPT-4 model that will be used by the chat_engine to improve the query.\n",
|
743 |
+
"gpt4 = OpenAI(temperature=0.9, model=\"gpt-4-0125-preview\")"
|
744 |
]
|
745 |
},
|
746 |
{
|
notebooks/15-Use_OpenSource_Models.ipynb
CHANGED
@@ -782,7 +782,7 @@
|
|
782 |
"# Create questions for each segment. These questions will be used to\n",
|
783 |
"# assess whether the retriever can accurately identify and return the\n",
|
784 |
"# corresponding segment when queried.\n",
|
785 |
-
"llm = OpenAI(model=\"gpt-3.5-turbo\")\n",
|
786 |
"rag_eval_dataset = generate_question_context_pairs(\n",
|
787 |
" nodes,\n",
|
788 |
" llm=llm,\n",
|
|
|
782 |
"# Create questions for each segment. These questions will be used to\n",
|
783 |
"# assess whether the retriever can accurately identify and return the\n",
|
784 |
"# corresponding segment when queried.\n",
|
785 |
+
"llm = OpenAI(model=\"gpt-3.5-turbo-0125\")\n",
|
786 |
"rag_eval_dataset = generate_question_context_pairs(\n",
|
787 |
" nodes,\n",
|
788 |
" llm=llm,\n",
|
scripts/basic_tutor.py
CHANGED
@@ -3,9 +3,10 @@ import os
|
|
3 |
from openai import OpenAI
|
4 |
|
5 |
# Retrieve your OpenAI API key from the environment variables and activate the OpenAI client
|
6 |
-
openai_api_key = os.environ.get(
|
7 |
client = OpenAI(api_key=openai_api_key)
|
8 |
|
|
|
9 |
def ask_ai_tutor(question):
|
10 |
|
11 |
# Check if OpenAI key has been correctly added
|
@@ -25,19 +26,20 @@ def ask_ai_tutor(question):
|
|
25 |
|
26 |
# Call the OpenAI API
|
27 |
response = client.chat.completions.create(
|
28 |
-
|
29 |
-
|
30 |
-
|
31 |
-
|
32 |
-
|
33 |
-
|
34 |
-
|
35 |
# Return the AI's response
|
36 |
return response.choices[0].message.content.strip()
|
37 |
|
38 |
except Exception as e:
|
39 |
return f"An error occurred: {e}"
|
40 |
|
|
|
41 |
def main():
|
42 |
# Check if a question was provided as a command-line argument
|
43 |
if len(sys.argv) != 2:
|
@@ -46,12 +48,13 @@ def main():
|
|
46 |
|
47 |
# The user's question is the first command-line argument
|
48 |
user_question = sys.argv[1]
|
49 |
-
|
50 |
# Get the AI's response
|
51 |
ai_response = ask_ai_tutor(user_question)
|
52 |
-
|
53 |
# Print the AI's response
|
54 |
print(f"AI Tutor says: {ai_response}")
|
55 |
|
|
|
56 |
if __name__ == "__main__":
|
57 |
main()
|
|
|
3 |
from openai import OpenAI
|
4 |
|
5 |
# Retrieve your OpenAI API key from the environment variables and activate the OpenAI client
|
6 |
+
openai_api_key = os.environ.get("OPENAI_API_KEY")
|
7 |
client = OpenAI(api_key=openai_api_key)
|
8 |
|
9 |
+
|
10 |
def ask_ai_tutor(question):
|
11 |
|
12 |
# Check if OpenAI key has been correctly added
|
|
|
26 |
|
27 |
# Call the OpenAI API
|
28 |
response = client.chat.completions.create(
|
29 |
+
model="gpt-3.5-turbo-0125",
|
30 |
+
messages=[
|
31 |
+
{"role": "system", "content": system_prompt},
|
32 |
+
{"role": "user", "content": prompt},
|
33 |
+
],
|
34 |
+
)
|
35 |
+
|
36 |
# Return the AI's response
|
37 |
return response.choices[0].message.content.strip()
|
38 |
|
39 |
except Exception as e:
|
40 |
return f"An error occurred: {e}"
|
41 |
|
42 |
+
|
43 |
def main():
|
44 |
# Check if a question was provided as a command-line argument
|
45 |
if len(sys.argv) != 2:
|
|
|
48 |
|
49 |
# The user's question is the first command-line argument
|
50 |
user_question = sys.argv[1]
|
51 |
+
|
52 |
# Get the AI's response
|
53 |
ai_response = ask_ai_tutor(user_question)
|
54 |
+
|
55 |
# Print the AI's response
|
56 |
print(f"AI Tutor says: {ai_response}")
|
57 |
|
58 |
+
|
59 |
if __name__ == "__main__":
|
60 |
main()
|