Sandaruth commited on
Commit
ee0f24f
1 Parent(s): 72c011d

Upload 8 files

Browse files
Files changed (8) hide show
  1. MultiQueryRetriever.py +216 -0
  2. README.md +12 -12
  3. Retrieval.py +33 -0
  4. app.py +78 -0
  5. htmlTemplates.py +51 -0
  6. model.py +62 -0
  7. prompts.py +60 -0
  8. requirements.txt +85 -0
MultiQueryRetriever.py ADDED
@@ -0,0 +1,216 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import asyncio
2
+ import logging
3
+ from typing import List, Optional, Sequence
4
+
5
+ from langchain_core.callbacks import (
6
+ AsyncCallbackManagerForRetrieverRun,
7
+ CallbackManagerForRetrieverRun,
8
+ )
9
+ from langchain_core.documents import Document
10
+ from langchain_core.language_models import BaseLanguageModel
11
+ from langchain_core.output_parsers import BaseOutputParser
12
+ from langchain_core.prompts.prompt import PromptTemplate
13
+ from langchain_core.retrievers import BaseRetriever
14
+
15
+ from langchain.chains.llm import LLMChain
16
+
17
+ logger = logging.getLogger(__name__)
18
+
19
+
20
+ class LineListOutputParser(BaseOutputParser[List[str]]):
21
+ """Output parser for a list of lines."""
22
+
23
+ def parse(self, text: str) -> List[str]:
24
+ lines = text.strip().split("\n")
25
+ return lines
26
+
27
+
28
+ # Default prompt
29
+ DEFAULT_QUERY_PROMPT = PromptTemplate(
30
+ input_variables=["question"],
31
+ template="""You are an AI language model assistant. Your task is
32
+ to generate 3 different versions of the given user
33
+ question to retrieve relevant documents from a vector database.
34
+ By generating multiple perspectives on the user question,
35
+ your goal is to help the user overcome some of the limitations
36
+ of distance-based similarity search. Provide these alternative
37
+ questions separated by newlines. Original question: {question}""",
38
+ )
39
+
40
+
41
+ def _unique_documents(documents: Sequence[Document]) -> List[Document]:
42
+ return [doc for i, doc in enumerate(documents) if doc not in documents[:i]][:4]
43
+
44
+
45
+ class MultiQueryRetriever(BaseRetriever):
46
+ """Given a query, use an LLM to write a set of queries.
47
+
48
+ Retrieve docs for each query. Return the unique union of all retrieved docs.
49
+ """
50
+
51
+ retriever: BaseRetriever
52
+ llm_chain: LLMChain
53
+ verbose: bool = True
54
+ parser_key: str = "lines"
55
+ """DEPRECATED. parser_key is no longer used and should not be specified."""
56
+ include_original: bool = False
57
+ """Whether to include the original query in the list of generated queries."""
58
+
59
+ @classmethod
60
+ def from_llm(
61
+ cls,
62
+ retriever: BaseRetriever,
63
+ llm: BaseLanguageModel,
64
+ prompt: PromptTemplate = DEFAULT_QUERY_PROMPT,
65
+ parser_key: Optional[str] = None,
66
+ include_original: bool = False,
67
+ ) -> "MultiQueryRetriever":
68
+ """Initialize from llm using default template.
69
+
70
+ Args:
71
+ retriever: retriever to query documents from
72
+ llm: llm for query generation using DEFAULT_QUERY_PROMPT
73
+ include_original: Whether to include the original query in the list of
74
+ generated queries.
75
+
76
+ Returns:
77
+ MultiQueryRetriever
78
+ """
79
+ output_parser = LineListOutputParser()
80
+ llm_chain = LLMChain(llm=llm, prompt=prompt, output_parser=output_parser)
81
+ return cls(
82
+ retriever=retriever,
83
+ llm_chain=llm_chain,
84
+ include_original=include_original,
85
+ )
86
+
87
+ async def _aget_relevant_documents(
88
+ self,
89
+ query: str,
90
+ *,
91
+ run_manager: AsyncCallbackManagerForRetrieverRun,
92
+ ) -> List[Document]:
93
+ """Get relevant documents given a user query.
94
+
95
+ Args:
96
+ question: user query
97
+
98
+ Returns:
99
+ Unique union of relevant documents from all generated queries
100
+ """
101
+ queries = await self.agenerate_queries(query, run_manager)
102
+ if self.include_original:
103
+ queries.append(query)
104
+ documents = await self.aretrieve_documents(queries, run_manager)
105
+ return self.unique_union(documents)
106
+
107
+ async def agenerate_queries(
108
+ self, question: str, run_manager: AsyncCallbackManagerForRetrieverRun
109
+ ) -> List[str]:
110
+ """Generate queries based upon user input.
111
+
112
+ Args:
113
+ question: user query
114
+
115
+ Returns:
116
+ List of LLM generated queries that are similar to the user input
117
+ """
118
+ response = await self.llm_chain.acall(
119
+ inputs={"question": question}, callbacks=run_manager.get_child()
120
+ )
121
+ lines = response["text"]
122
+ if self.verbose:
123
+ logger.info(f"Generated queries: {lines}")
124
+ return lines
125
+
126
+ async def aretrieve_documents(
127
+ self, queries: List[str], run_manager: AsyncCallbackManagerForRetrieverRun
128
+ ) -> List[Document]:
129
+ """Run all LLM generated queries.
130
+
131
+ Args:
132
+ queries: query list
133
+
134
+ Returns:
135
+ List of retrieved Documents
136
+ """
137
+ document_lists = await asyncio.gather(
138
+ *(
139
+ self.retriever.aget_relevant_documents(
140
+ query, callbacks=run_manager.get_child()
141
+ )
142
+ for query in queries
143
+ )
144
+ )
145
+ return [doc for docs in document_lists for doc in docs]
146
+
147
+ def _get_relevant_documents(
148
+ self,
149
+ query: str,
150
+ *,
151
+ run_manager: CallbackManagerForRetrieverRun,
152
+ ) -> List[Document]:
153
+ """Get relevant documents given a user query.
154
+
155
+ Args:
156
+ question: user query
157
+
158
+ Returns:
159
+ Unique union of relevant documents from all generated queries
160
+ """
161
+ queries = self.generate_queries(query, run_manager)
162
+ if self.include_original:
163
+ queries.append(query)
164
+ documents = self.retrieve_documents(queries, run_manager)
165
+ return self.unique_union(documents)
166
+
167
+ def generate_queries(
168
+ self, question: str, run_manager: CallbackManagerForRetrieverRun
169
+ ) -> List[str]:
170
+ """Generate queries based upon user input.
171
+
172
+ Args:
173
+ question: user query
174
+
175
+ Returns:
176
+ List of LLM generated queries that are similar to the user input
177
+ """
178
+ response = self.llm_chain(
179
+ {"question": question}, callbacks=run_manager.get_child()
180
+ )
181
+ lines = response["text"]
182
+ if self.verbose:
183
+ logger.info(f"Generated queries: {lines}")
184
+ return lines
185
+
186
+ def retrieve_documents(
187
+ self, queries: List[str], run_manager: CallbackManagerForRetrieverRun
188
+ ) -> List[Document]:
189
+ """Run all LLM generated queries.
190
+
191
+ Args:
192
+ queries: query list
193
+
194
+ Returns:
195
+ List of retrieved Documents
196
+ """
197
+ documents = []
198
+ for query in queries:
199
+ docs = self.retriever.get_relevant_documents(
200
+ query, callbacks=run_manager.get_child()
201
+ )
202
+ documents.extend(docs)
203
+ print("retrieve documents--", len(documents))
204
+ return documents
205
+
206
+ def unique_union(self, documents: List[Document]) -> List[Document]:
207
+ """Get unique Documents.
208
+
209
+ Args:
210
+ documents: List of retrieved Documents
211
+
212
+ Returns:
213
+ List of unique retrieved Documents
214
+ """
215
+ print("unique union--", len(documents))
216
+ return _unique_documents(documents)
README.md CHANGED
@@ -1,12 +1,12 @@
1
- ---
2
- title: StockGPT
3
- emoji: 📚
4
- colorFrom: red
5
- colorTo: red
6
- sdk: streamlit
7
- sdk_version: 1.35.0
8
- app_file: app.py
9
- pinned: false
10
- ---
11
-
12
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
1
+ ---
2
+ title: StockGPT
3
+ emoji: 📚
4
+ colorFrom: red
5
+ colorTo: red
6
+ sdk: streamlit
7
+ sdk_version: 1.35.0
8
+ app_file: app.py
9
+ pinned: false
10
+ ---
11
+
12
+ Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
Retrieval.py ADDED
@@ -0,0 +1,33 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import logging
2
+ from model import llm, vectorstore, splitter, embedding, QA_PROMPT
3
+
4
+
5
+ # Chain for Web
6
+ from langchain.chains import RetrievalQA
7
+
8
+ bsic_chain = RetrievalQA.from_chain_type(
9
+ llm=llm,
10
+ chain_type="stuff",
11
+ retriever = vectorstore.as_retriever(search_kwargs={"k": 4}),
12
+ return_source_documents= True,
13
+ input_key="question",
14
+ chain_type_kwargs={"prompt": QA_PROMPT},
15
+ )
16
+
17
+
18
+
19
+ from MultiQueryRetriever import MultiQueryRetriever
20
+
21
+ retriever_from_llm = MultiQueryRetriever.from_llm(
22
+ retriever=vectorstore.as_retriever(search_kwargs={"k": 3}),
23
+ llm=llm,
24
+ )
25
+
26
+ multiQuery_chain = RetrievalQA.from_chain_type(
27
+ llm=llm,
28
+ chain_type="stuff",
29
+ retriever = retriever_from_llm,
30
+ return_source_documents= True,
31
+ input_key="question",
32
+ chain_type_kwargs={"prompt": QA_PROMPT},
33
+ )
app.py ADDED
@@ -0,0 +1,78 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+ from Retrieval import bsic_chain, multiQuery_chain
3
+ import time
4
+
5
+ from htmlTemplates import css, bot_template, user_template, source_template
6
+
7
+ st.set_page_config(page_title="Chat with StockGPT", page_icon=":currency_exchange:")
8
+ st.write(css, unsafe_allow_html=True)
9
+
10
+ def main():
11
+ # Set up the layout --------------------------------------------------------------
12
+ st.sidebar.title("Guideline")
13
+ st.sidebar.markdown("""
14
+ 1. Type your message in the chat box on the right.
15
+ 2. Hit Enter or click the send button to send your message.
16
+ 3. Chat bot responses will appear below.
17
+ 4. Source documents will be displayed in the sidebar.
18
+ """)
19
+
20
+ # Dropdown to select model --------------------------------------------------------
21
+ model_selection = st.sidebar.selectbox("Select Model", ["Basic", "MultiQuery"])
22
+ print(model_selection)
23
+
24
+ # Button to connect to Google link ------------------------------------------------
25
+ st.sidebar.markdown('<a href="https://drive.google.com/drive/folders/13v6LsaYH9wEwvqVtlLG1U4OiUHgZ7hY4?usp=sharing" target="_blank" style="display: inline-block;'
26
+ 'background-color: #475063; color: white; padding: 10px 20px; text-align: center;border: 1px solid white;'
27
+ 'text-decoration: none; cursor: pointer; border-radius: 5px;">Sources</a>',
28
+ unsafe_allow_html=True)
29
+
30
+ st.title("StockGPT Chat App")
31
+
32
+ # Chat area -----------------------------------------------------------------------
33
+ user_input = st.text_input("", key="user_input",placeholder="Type your question here...")
34
+
35
+ # JavaScript code to submit the form on Enter key press
36
+ js_submit = f"""
37
+ document.addEventListener("keydown", function(event) {{
38
+ if (event.code === "Enter" && !event.shiftKey) {{
39
+ document.querySelector(".stTextInput").dispatchEvent(new Event("submit"));
40
+ }}
41
+ }});
42
+ """
43
+ st.markdown(f'<script>{js_submit}</script>', unsafe_allow_html=True)
44
+
45
+ if st.button("Send"):
46
+ if user_input:
47
+ with st.spinner('Waiting for response...'):
48
+ # Add bot response here (you can replace this with your bot logic)
49
+ response, metadata, source_documents = generate_bot_response(user_input, model_selection)
50
+ st.write(user_template.replace("{{MSG}}", user_input), unsafe_allow_html=True)
51
+ st.write(bot_template.replace("{{MSG}}", response), unsafe_allow_html=True)
52
+
53
+ # Source documents
54
+ st.sidebar.title("Source Documents")
55
+ for i, doc in enumerate(source_documents, 1):
56
+ tit = metadata[i-1]["source"].split("\\")[-1]
57
+ with st.sidebar.expander(f"{tit}"):
58
+ st.write(doc) # Assuming the Document object can be directly written to display its content
59
+
60
+ def generate_bot_response(user_input, model):
61
+ # Simple bot logic (replace with your actual bot logic)
62
+ start_time = time.time()
63
+ print(f"User Input: {user_input}")
64
+
65
+ if model == "Basic":
66
+ res = bsic_chain(user_input)
67
+ elif model == "MultiQuery":
68
+ res = multiQuery_chain(user_input)
69
+
70
+ response = res['result']
71
+ metadata = [i.metadata for i in res.get("source_documents", [])]
72
+ end_time = time.time()
73
+ response_time = end_time - start_time
74
+ print(f"Response Time: {response_time} seconds")
75
+ return response, metadata, res.get('source_documents', [])
76
+
77
+ if __name__ == "__main__":
78
+ main()
htmlTemplates.py ADDED
@@ -0,0 +1,51 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ css = '''
2
+ <style>
3
+ .chat-message {
4
+ padding: 1.5rem; border-radius: 0.5rem; margin-bottom: 1rem; display: flex
5
+ }
6
+ .chat-message.user {
7
+ background-color: #2b313e
8
+ }
9
+ .chat-message.bot {
10
+ background-color: #475063
11
+ }
12
+ .chat-message .avatar {
13
+ width: 20%;
14
+ }
15
+ .chat-message .avatar img {
16
+ max-width: 78px;
17
+ max-height: 78px;
18
+ border-radius: 50%;
19
+ object-fit: cover;
20
+ }
21
+ .chat-message .message {
22
+ width: 80%;
23
+ padding: 0 1.5rem;
24
+ color: #fff;
25
+ }
26
+ '''
27
+
28
+ bot_template = '''
29
+ <div class="chat-message bot">
30
+ <div class="avatar">
31
+ <img src="https://cdn-icons-png.flaticon.com/128/4712/4712038.png">
32
+ </div>
33
+ <div class="message">{{MSG}}</div>
34
+ </div>
35
+ '''
36
+ user_template = '''
37
+ <div class="chat-message user">
38
+ <div class="avatar">
39
+ <img src="https://cdn-icons-png.flaticon.com/512/1177/1177568.png">
40
+ </div>
41
+ <div class="message">{{MSG}}</div>
42
+ </div>
43
+ '''
44
+ source_template = '''
45
+ <div class="chat-message bot">
46
+ <div class="avatar">
47
+ <img src="https://st.depositphotos.com/1427101/4468/v/950/depositphotos_44680417-stock-illustration-pdf-paper-sheet-icons.jpg">
48
+ </div>
49
+ <div class="message">{{MSG}}</div>
50
+ </div>
51
+ '''
model.py ADDED
@@ -0,0 +1,62 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ from dotenv import load_dotenv
3
+ from prompts import qa_template_V0, qa_template_V1, qa_template_V2
4
+
5
+ # Load environment variables from .env file
6
+ load_dotenv()
7
+
8
+
9
+
10
+ from langchain.chat_models import ChatAnyscale
11
+
12
+ ANYSCALE_ENDPOINT_TOKEN=os.environ.get("ANYSCALE_ENDPOINT_TOKEN")
13
+ anyscale_api_key =ANYSCALE_ENDPOINT_TOKEN
14
+
15
+ llm=ChatAnyscale(anyscale_api_key=anyscale_api_key,temperature=0, model_name='mistralai/Mistral-7B-Instruct-v0.1', streaming=False)
16
+
17
+
18
+ ## Create embeddings and splitter
19
+
20
+ from langchain.embeddings import HuggingFaceBgeEmbeddings
21
+ from langchain.text_splitter import RecursiveCharacterTextSplitter
22
+
23
+
24
+
25
+
26
+
27
+ # Create Embeddings
28
+ model_name = "BAAI/bge-large-en"
29
+
30
+ embedding = HuggingFaceBgeEmbeddings(
31
+ model_name = model_name,
32
+ # model_kwargs = {'device':'cuda'},
33
+ encode_kwargs = {'normalize_embeddings': True}
34
+ )
35
+
36
+ # Create Splitter
37
+ splitter = RecursiveCharacterTextSplitter(
38
+ chunk_size=1000,
39
+ chunk_overlap=100,
40
+ )
41
+
42
+
43
+
44
+
45
+ from langchain_community.vectorstores import FAISS
46
+
47
+ # persits_directory="./faiss_V04_C500_BGE_large_web_doc_with_split-final"
48
+ persits_directory="./faiss_V06_C500_BGE_large-Final"
49
+
50
+ vectorstore= FAISS.load_local(persits_directory, embedding)
51
+
52
+
53
+
54
+
55
+ # Define a custom prompt for Unser manual
56
+ from langchain.prompts import PromptTemplate
57
+
58
+ QA_PROMPT = PromptTemplate(input_variables=["context", "question"],template=qa_template_V2,)
59
+
60
+
61
+
62
+
prompts.py ADDED
@@ -0,0 +1,60 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ qa_template_V0 = ("""
2
+ You are the AI assistant of the IronOne Technologies which provide services for companies members and novice users with learning with ATrad Aplication .
3
+ You have provided context information below related to learning material.
4
+
5
+ Context: {context}
6
+
7
+ Given this information, please answer the question with the latest information.
8
+ If you dont know the answer say you dont know, dont try to makeup answers.
9
+ if context is not enough to answer the question, ask for more information.
10
+ if context is not related to the question, say I dont know.
11
+
12
+ give the answer with very clear structure and clear language.
13
+
14
+ each answer Must start with code word ATrad Ai(QA):
15
+
16
+ Question: {question}
17
+
18
+ answer: let me think about it...""")
19
+
20
+ qa_template_V1 = ("""
21
+ Welcome to IronOne Technologies' AI Assistant, designed to assist you in learning with the ATrad Application.
22
+
23
+ Context: {context}
24
+
25
+ As your AI assistant, I'm here to help you navigate through learning materials and provide guidance.
26
+ Please provide me with any questions or concerns you have regarding the ATrad Application.
27
+ If you're unsure about something or need more information, feel free to ask.
28
+
29
+ Important:-No need to mention provided document. give strictly answers.
30
+ -Give answers in a very structured manner to understand easily.
31
+
32
+ each answer Must start with code word ATrad Ai(QA):
33
+
34
+ Question: {question}
35
+
36
+ ATrad Ai(QA): Let me think about it...""")
37
+
38
+ qa_template_V2= ("""
39
+ <<SYS>>
40
+
41
+ You are the AI assistant for ATrad, which offers services to members and novice users through the ATrad Application and Online Invest platform.
42
+ ATrad is a specialized trading and analytics platform focusing on Emerging Markets, with a strong emphasis on ESG (Environmental, Social, and Governance) investments. It holds a dominant market share of 75% among member firms of the Colombo Stock Exchange in Sri Lanka.
43
+ Please refrain from providing fabricated answers. If you're unsure, simply state that you don't know and avoid adding any information not mentioned in the provided context.
44
+ If the context is unclear, ask for clarification rather than making assumptions. If a question falls outside the scope of your domain, politely indicate so.
45
+
46
+ Important : No need to mention provided document. give strictly answers. Give answers in a very structured manner to understand easily.
47
+ Important and Urgent : Explain the answers point by point and give the answer in a very structured manner to understand easily.
48
+
49
+ Ensure your responses are polite, concise, and straightforward.
50
+
51
+ If the question pertains to topics beyond ATrad Application and Online Invest platform, indicate that it's outside your area of expertise.
52
+ <</SYS>>
53
+
54
+ [INST]
55
+ <DOCUMENTS>
56
+ {context}
57
+ </DOCUMENTS>
58
+
59
+ Question : {question}[/INST]"""
60
+ )
requirements.txt ADDED
@@ -0,0 +1,85 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ aiohttp==3.9.3
2
+ aiosignal==1.3.1
3
+ annotated-types==0.6.0
4
+ anyio==4.2.0
5
+
6
+ async-timeout==4.0.3
7
+ attrs==23.2.0
8
+ certifi==2024.2.2
9
+ charset-normalizer==3.3.2
10
+ click==8.1.7
11
+
12
+ dataclasses-json==0.6.4
13
+
14
+ distro==1.9.0
15
+
16
+ faiss-cpu==1.7.4
17
+ filelock==3.13.1
18
+ frozenlist==1.4.1
19
+ fsspec==2024.2.0
20
+ greenlet==3.0.3
21
+ h11==0.14.0
22
+ httpcore==1.0.3
23
+ httpx==0.26.0
24
+ huggingface-hub==0.20.3
25
+ idna==3.6
26
+
27
+ Jinja2==3.1.3
28
+ joblib==1.3.2
29
+ jsonpatch==1.33
30
+ jsonpointer==2.4
31
+
32
+ langchain==0.1.7
33
+ langchain-community==0.0.20
34
+ langchain-core==0.1.23
35
+ langchain-openai==0.0.6
36
+ langsmith==0.0.87
37
+ MarkupSafe==2.1.5
38
+ marshmallow==3.20.2
39
+
40
+ mpmath==1.3.0
41
+ multidict==6.0.5
42
+ mypy-extensions==1.0.0
43
+
44
+ networkx==3.2.1
45
+ nltk==3.8.1
46
+ numpy==1.26.4
47
+ openai==1.12.0
48
+
49
+ pillow==10.2.0
50
+
51
+
52
+ pydantic==2.6.1
53
+ pydantic_core==2.16.2
54
+
55
+ python-dotenv==1.0.1
56
+ # pywin32==305.1
57
+ PyYAML==6.0.1
58
+
59
+ regex==2023.12.25
60
+ requests==2.31.0
61
+ safetensors==0.4.2
62
+ scikit-learn==1.4.1.post1
63
+ scipy==1.12.0
64
+ sentence-transformers==2.3.1
65
+ sentencepiece==0.1.99
66
+
67
+ sniffio==1.3.0
68
+ SQLAlchemy==2.0.27
69
+
70
+ sympy==1.12
71
+ tenacity==8.2.3
72
+ threadpoolctl==3.3.0
73
+ tiktoken==0.6.0
74
+ tokenizers==0.15.2
75
+ torch==2.2.0
76
+ torchaudio==2.2.0
77
+ torchvision==0.17.0
78
+
79
+ tqdm==4.66.2
80
+
81
+ transformers==4.37.2
82
+ typing-inspect==0.9.0
83
+
84
+ yarl==1.9.4
85
+