feat: added the bot
Browse files
app.py
CHANGED
@@ -1,63 +1,143 @@
|
|
1 |
-
|
2 |
-
|
3 |
-
|
4 |
-
|
5 |
-
|
6 |
-
|
7 |
-
|
8 |
-
|
9 |
-
|
10 |
-
|
11 |
-
|
12 |
-
|
13 |
-
|
14 |
-
|
15 |
-
|
16 |
-
|
17 |
-
|
18 |
-
|
19 |
-
|
20 |
-
|
21 |
-
|
22 |
-
|
23 |
-
|
24 |
-
|
25 |
-
|
26 |
-
|
27 |
-
|
28 |
-
|
29 |
-
|
30 |
-
|
31 |
-
|
32 |
-
|
33 |
-
|
34 |
-
|
35 |
-
|
36 |
-
|
37 |
-
|
38 |
-
|
39 |
-
|
40 |
-
|
41 |
-
|
42 |
-
|
43 |
-
|
44 |
-
|
45 |
-
|
46 |
-
|
47 |
-
|
48 |
-
|
49 |
-
|
50 |
-
|
51 |
-
|
52 |
-
|
53 |
-
|
54 |
-
|
55 |
-
|
56 |
-
|
57 |
-
|
58 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
59 |
)
|
60 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
61 |
|
62 |
-
|
63 |
-
demo.launch()
|
|
|
1 |
+
# -*- coding: utf-8 -*-
|
2 |
+
"""lastbot.ipynb
|
3 |
+
|
4 |
+
Automatically generated by Colab.
|
5 |
+
|
6 |
+
Original file is located at
|
7 |
+
https://colab.research.google.com/drive/1opFBOe_TDrO_j2naogZ3uT8KGTa-fdLT
|
8 |
+
"""
|
9 |
+
|
10 |
+
#!pip install pypdf
|
11 |
+
## Embedding
|
12 |
+
#!pip install install sentence_transformers
|
13 |
+
#!pip install llama_index
|
14 |
+
|
15 |
+
"""import three key functions from LlamaIndex namely:
|
16 |
+
|
17 |
+
|
18 |
+
|
19 |
+
"""
|
20 |
+
|
21 |
+
import lamma_index
|
22 |
+
import huggingface_hub
|
23 |
+
|
24 |
+
from llama_index import VectorStoreIndex, SimpleDirectoryReader, ServiceContext
|
25 |
+
from llama_index.llms import HuggingFaceLLM
|
26 |
+
from llama_index.prompts.prompts import SimpleInputPrompt
|
27 |
+
|
28 |
+
"""
|
29 |
+
|
30 |
+
---
|
31 |
+
|
32 |
+
"""
|
33 |
+
|
34 |
+
#!pip uninstall langchain llama_index -y
|
35 |
+
|
36 |
+
#!pip install langchain==0.0.150 llama_index==0.5.1
|
37 |
+
|
38 |
+
import llama
|
39 |
+
|
40 |
+
documents=SimpleDirectoryReader("/content/sample_data/data/insurance-2030-the-impact-of-ai-on-the-future-of-insurance-f.pdf").load_data()
|
41 |
+
documents
|
42 |
+
|
43 |
+
"""I will build a system prompt template , this is required for Llama2 to control the context and to manage the response."""
|
44 |
+
|
45 |
+
system_prompt="""
|
46 |
+
You are a Q&A assistant. Your goal is to answer questions as
|
47 |
+
accurately as possible based on the instructions and context provided.
|
48 |
+
"""
|
49 |
+
## Default format supportable by LLama2
|
50 |
+
query_wrapper_prompt=SimpleInputPrompt("<|USER|>{query_str}<|ASSISTANT|>")
|
51 |
+
|
52 |
+
"""Login hugging face and get the access token . This is required to link up with hugging face api’s"""
|
53 |
+
|
54 |
+
!huggingface-cli login
|
55 |
+
|
56 |
+
"""Next, we woud call Llama2 model and for this i will torch framework."""
|
57 |
+
|
58 |
+
import torch
|
59 |
+
|
60 |
+
llm = HuggingFaceLLM(
|
61 |
+
context_window=4096,
|
62 |
+
max_new_tokens=256,
|
63 |
+
generate_kwargs={"temperature": 0.0, "do_sample": False},
|
64 |
+
system_prompt=system_prompt,
|
65 |
+
query_wrapper_prompt=query_wrapper_prompt,
|
66 |
+
tokenizer_name="meta-llama/Llama-2-7b-chat-hf",
|
67 |
+
model_name="meta-llama/Llama-2-7b-chat-hf",
|
68 |
+
device_map="auto",
|
69 |
+
# uncomment this if using CUDA to reduce memory usage
|
70 |
+
model_kwargs={"torch_dtype": torch.float16 , "load_in_8bit":True}
|
71 |
)
|
72 |
|
73 |
+
"""llm = HuggingFaceLLM(...): Creates an instance of the LLM object.
|
74 |
+
context_window=4096: Defines how much past conversation history (4096 words) the LLM considers when generating responses.
|
75 |
+
max_new_tokens=256: Limits the maximum number of words the LLM generates per response (256).
|
76 |
+
generate_kwargs={"temperature": 0.0, "do_sample": False}: Controls how creative the LLM is with its responses. Low temperature and not sampling make it more predictable and similar to the training data.
|
77 |
+
system_prompt, query_wrapper_prompt: Templates used to guide the LLM's understanding of the context and how to respond.
|
78 |
+
tokenizer_name, model_name: Specify the Llama2 model and tokenizer from Hugging Face.
|
79 |
+
device_map="auto": Automatically uses GPU if available, otherwise CPU.
|
80 |
+
model_kwargs (commented out): Reduce memory usage if using a GPU (CUDA) by utilizing half-precision floats (torch.float16) and loading the model in 8-bit format.
|
81 |
+
|
82 |
+
its time to build the embeddings using llamaIndex
|
83 |
+
"""
|
84 |
+
|
85 |
+
from langchain.embeddings.huggingface import HuggingFaceEmbeddings
|
86 |
+
from llama_index import ServiceContext
|
87 |
+
from llama_index.embeddings import LangchainEmbedding
|
88 |
+
|
89 |
+
embed_model=LangchainEmbedding(
|
90 |
+
HuggingFaceEmbeddings(model_name="sentence-transformers/all-mpnet-base-v2"))
|
91 |
+
|
92 |
+
"""from langchain.embeddings.huggingface import HuggingFaceEmbeddings: This line imports the HuggingFaceEmbeddings class from the Langchain library, which allows you to use pre-trained models from the Hugging Face Hub for embedding text data.
|
93 |
+
|
94 |
+
|
95 |
+
---
|
96 |
+
|
97 |
+
from llama_index import ServiceContext: This imports the ServiceContext class from LlamaIndex, which manages and coordinates different components of your application.
|
98 |
+
|
99 |
+
|
100 |
+
---
|
101 |
+
from llama_index.embeddings import LangchainEmbedding: This imports the LangchainEmbedding class from LlamaIndex, which provides a wrapper for using various embedding models within the LlamaIndex framework.
|
102 |
+
|
103 |
+
|
104 |
+
---
|
105 |
+
Embedding Model Configuration:
|
106 |
+
embed_model = LangchainEmbedding(...): This line creates an instance of the LangchainEmbedding class.
|
107 |
+
HuggingFaceEmbeddings(model_name="sentence-transformers/all-mpnet-base-v2"): This nested configuration specifies the HuggingFace embedding model to be used. In this case, it's the all-mpnet-base-v2 model from the sentence-transformers library, which is known for its good performance on sentence embedding tasks.
|
108 |
+
|
109 |
+
|
110 |
+
"""
|
111 |
+
|
112 |
+
# Configure the service context as
|
113 |
+
|
114 |
+
service_context=ServiceContext.from_defaults(
|
115 |
+
chunk_size=1024,
|
116 |
+
llm=llm,
|
117 |
+
embed_model=embed_model
|
118 |
+
)
|
119 |
+
|
120 |
+
"""Setup up the vector store as
|
121 |
+
|
122 |
+
Essentially, this line transforms our text documents into numerical representations (embeddings) using the embedding model and then stores them within the index object. This allows our RAG application to efficiently find relevant documents based on user queries during conversation
|
123 |
+
"""
|
124 |
+
|
125 |
+
index=VectorStoreIndex.from_documents(documents,service_context=service_context)
|
126 |
+
|
127 |
+
"""creates a “query engine” object called query_engine from our existing index object. This engine allows usto easily search and retrieve information from the dataset we built earlier.
|
128 |
+
|
129 |
+
|
130 |
+
---
|
131 |
+
|
132 |
+
|
133 |
+
We can use the query_engine to ask questions about the data stored in our index. It will use the embeddings it holds to find relevant documents and retrieve information from them
|
134 |
+
"""
|
135 |
+
|
136 |
+
query_engine=index.as_query_engine()
|
137 |
+
|
138 |
+
"""Time to test the RAG application."""
|
139 |
+
|
140 |
+
response=query_engine.query("How would insurance be in 2030")
|
141 |
+
print(response)
|
142 |
|
143 |
+
"""second test"""
|
|