Files changed (12) hide show
  1. .gitattributes +0 -3
  2. .gitignore +2 -9
  3. Dockerfile +21 -88
  4. README.md +2 -17
  5. app.py +28 -0
  6. pages/1_Chatbot_AMS_Langchain.py +152 -0
  7. pages/2_Chatbot_AMS_Canopy.py +157 -0
  8. poetry.lock +0 -0
  9. prompts.py +69 -0
  10. pyproject.toml +23 -0
  11. queries.py +145 -0
  12. requirements.txt +138 -0
.gitattributes CHANGED
@@ -32,7 +32,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
32
  *.xz filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
- *.pdf filter=lfs diff=lfs merge=lfs -text
36
- *.jsonl filter=lfs diff=lfs merge=lfs -text
37
  *tfevents* filter=lfs diff=lfs merge=lfs -text
38
- data/AMS/ams_data-400-0-all.json filter=lfs diff=lfs merge=lfs -text
 
32
  *.xz filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
 
 
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
.gitignore CHANGED
@@ -1,10 +1,3 @@
1
-
2
- .env
3
- *.log
4
- # *.pdf
5
- *.DS_Store
6
- .ragatouille/
7
  .venv/
8
- db/
9
- scripts/__pycache__
10
- scripts/tmp_trainer
 
 
 
 
 
 
 
1
  .venv/
2
+ __pycache__/
3
+ .env
 
Dockerfile CHANGED
@@ -1,101 +1,34 @@
1
  # Use an official Python runtime as a parent image
2
- FROM python:3.11.5-bookworm
3
 
4
- # Do root things: clone repo and install dependencies. libsndfile1 for spotlight. libhdf5-serial-dev for vector distance.
5
- USER root
6
 
7
- RUN useradd -m -u 1000 user && chown -R user:user /home/user && chmod -R 777 /home/user
 
8
 
9
- WORKDIR /clonedir
10
- RUN apt-get update && \
11
- apt-get install -y git
12
- RUN git clone --depth 1 https://github.com/dan-s-mueller/aerospace_chatbot.git .
13
 
14
- RUN apt-get update && apt-get install -y \
15
- libhdf5-serial-dev \
16
- libsndfile1 \
17
- && rm -rf /var/lib/apt/lists/*
 
18
 
19
- USER user
 
20
 
21
- # Set home to the user's home directory
22
- ENV HOME=/home/user \
23
- PATH=/home/user/.local/bin:$PATH
24
- WORKDIR $HOME
25
-
26
- # Create directories for the app code to be copied into
27
- RUN mkdir $HOME/app
28
- RUN mkdir $HOME/src
29
- RUN mkdir $HOME/data
30
- RUN mkdir $HOME/config
31
-
32
- # Give all users read/write permissions to the app code directories
33
- RUN chmod 777 $HOME/app
34
- RUN chmod 777 $HOME/src
35
- RUN chmod 777 $HOME/data
36
- RUN chmod 777 $HOME/config
37
-
38
- # Install Poetry
39
- RUN pip3 install poetry==1.7.1
40
-
41
- # Copy poetry files from repo into home. cp commands for non-local builds.
42
- # COPY --chown=user:user pyproject.toml $HOME
43
- RUN cp /clonedir/pyproject.toml $HOME
44
- RUN chown user:user $HOME/pyproject.toml
45
-
46
- # Disable virtual environments creation by Poetry as the Docker container itself is an isolated environment
47
- RUN poetry config virtualenvs.in-project true
48
-
49
- # Set the name of the virtual environment
50
- RUN poetry config virtualenvs.path $HOME/.venv
51
-
52
- # Set environment variables
53
- ENV PATH="$HOME/.venv/bin:$PATH"
54
-
55
- # Install dependencies using Poetry
56
- RUN poetry install --no-root
57
-
58
- # Copy the rest of your application code. Use cp for github config, followed by chown statements. cp commands for non-local builds.
59
- # COPY --chown=user:user ./src $HOME/src
60
- # COPY --chown=user:user ./data $HOME/data
61
- # COPY --chown=user:user ./config $HOME/config
62
- # COPY --chown=user:user ./app $HOME/app
63
- RUN cp -R /clonedir/src /clonedir/data /clonedir/config /clonedir/app $HOME
64
- RUN chown -R user:user $HOME/src $HOME/data $HOME/config $HOME/app
65
-
66
- # Set up database path and env variabole. Comment out if running on hugging face spaces
67
- # RUN mkdir $HOME/db
68
- # RUN chmod 777 $HOME/db
69
- # ENV LOCAL_DB_PATH=$HOME/db
70
-
71
- # Set final work directory for the application
72
- WORKDIR $HOME/app
73
- RUN pwd
74
- RUN ls -R
75
-
76
- # Expose the port Streamlit runs on
77
  EXPOSE 8501
78
- EXPOSE 9000
79
 
80
  # The HEALTHCHECK instruction tells Docker how to test a container to check that it is still working. Your container needs to listen to Streamlit’s (default) port 8501:
81
  HEALTHCHECK CMD curl --fail http://localhost:8501/_stcore/health
82
 
83
- # An ENTRYPOINT allows you to configure a container that will run as an executable.
84
- # Here, it also contains the entire streamlit run command for your app, so you don’t have to call it from the command line
85
- # Port 9000 will not be accessible from the hugging face space.
86
- ENTRYPOINT ["streamlit", "run", "Home.py", "--server.port=8501", "--server.address=0.0.0.0"]
87
-
88
- # Run this if you're running with terminal locally
89
- # ENTRYPOINT ["/bin/bash", "-c"]
90
-
91
- # To run locally
92
- # docker build -t aerospace-chatbot .
93
- # docker run --user 1000:1000 -p 8501:8501 -p 9000:9000 -it aerospace-chatbot
94
-
95
- # To run locally with a terminal.
96
- # docker build -t aerospace-chatbot .
97
- # docker run --user 1000:1000 --entrypoint /bin/bash -it aerospace-chatbot
98
 
99
- # To run remotely from hugging face spaces
100
- # docker run -it --user 1000:1000 -p 7860:7860 --platform=linux/amd64 \
101
- # registry.hf.space/ai-aerospace-aerospace-chatbots:latest
 
1
  # Use an official Python runtime as a parent image
2
+ FROM python:3.11.1
3
 
4
+ # Set the working directory in the container
5
+ WORKDIR /app
6
 
7
+ # Install poetry
8
+ # RUN pip3 install poetry==1.7.1
9
 
10
+ # Copy the current directory contents into the container at /usr/src/app
11
+ COPY . .
 
 
12
 
13
+ # Install dependencies
14
+ # RUN poetry config virtualenvs.create false \
15
+ # && poetry install --no-interaction --no-ansi
16
+ # Streamlit must be installed separately. Potentially this will cause an issue with dependencies in the future, but it's the only way it works.
17
+ # RUN pip3 install streamlit
18
 
19
+ # Install dependencies
20
+ RUN pip3 install -r requirements.txt
21
 
22
+ # Make a port available to the world outside this container
23
+ # The EXPOSE instruction informs Docker that the container listens on the specified network ports at runtime. Your container needs to listen to Streamlit’s (default) port 8501.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
24
  EXPOSE 8501
 
25
 
26
  # The HEALTHCHECK instruction tells Docker how to test a container to check that it is still working. Your container needs to listen to Streamlit’s (default) port 8501:
27
  HEALTHCHECK CMD curl --fail http://localhost:8501/_stcore/health
28
 
29
+ # An ENTRYPOINT allows you to configure a container that will run as an executable. Here, it also contains the entire streamlit run command for your app, so you don’t have to call it from the command line
30
+ ENTRYPOINT ["streamlit", "run", "app.py", "--server.port=8501", "--server.address=0.0.0.0"]
 
 
 
 
 
 
 
 
 
 
 
 
 
31
 
32
+ # Execute with:
33
+ # docker build -t <image_name> .
34
+ # docker run -p 8501:8501 <image_name>
README.md CHANGED
@@ -1,26 +1,11 @@
1
  ---
2
- title: Aerospace Chatbot, Aerospace Mechanism Symposia
3
  emoji: 🚀
4
  colorFrom: gray
5
  colorTo: blue
6
  sdk: docker
7
  pinned: false
8
  app_port: 8501
9
- short_description: Aerospace Chatbot, AMS
10
- datasets:
11
- - ai-aerospace/ams_data_full_2000-2020
12
  ---
13
 
14
- # Aerospace Chatbot
15
- Aerospace discipline-specific chatbots and AI tools.
16
- Help docs here: https://aerospace-chatbot.readthedocs.io/en/latest/
17
-
18
- The github repository is located here: [aerospace chatbot](https://github.com/dan-s-mueller/aerospace_chatbot)
19
-
20
- ## This Deployment
21
- This deployment is loaded with all Aerospace Mecahnism Symposia papers located in [/data/AMS/](https://github.com/dan-s-mueller/aerospace_chatbot/tree/documentation_update/data/AMS) on Hugging Face Spaces.
22
-
23
- There is persistent storage of vector databases when using RAGatouille or ChromaDB. There are a few of preloaded databases. * indicates the database config (OpenAI/Hugging Face, RAGatouille/ChromaDB).
24
-
25
- - *-ams-15merge-2000-2020: Database with each document representing 15 merged pages. Covers AMS years 2000-2020.
26
- - *-ams-nomerge-400-0-2000-2020: Database with each document representing chunks of 400 characters, no overlap, and no page merging. Covers AMS years 2000-2020.
 
1
  ---
2
+ title: Aerospace Chatbots
3
  emoji: 🚀
4
  colorFrom: gray
5
  colorTo: blue
6
  sdk: docker
7
  pinned: false
8
  app_port: 8501
 
 
 
9
  ---
10
 
11
+ Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
 
app.py ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+ import os
3
+
4
+ # Set up page
5
+ st.set_page_config(
6
+ page_title="Aerospace Chatbot: AMS",
7
+ )
8
+ st.title("Aerospace Chatbot Homepage")
9
+ st.markdown("Code base: https://github.com/dsmueller3760/aerospace_chatbot")
10
+ st.markdown('---')
11
+ st.title("Chatbots")
12
+ st.markdown("""
13
+ Chatbots for aerospace mechanisms symposia, using all available papers published since 2000
14
+ * Aerospace Mechanisms Chatbot, Langchain: Uses langchain QA retrieval https://databutton.com/v/71z0llw3/Aerospace_Mechanisms_Chat_Bot_Langchain
15
+ * Aerospace Mechanisms Chatbot, Canopy: Uses pinecone's canopy tool https://databutton.com/v/71z0llw3/Aerospace_Mechanisms_Chat_Bot_Canopy
16
+ """)
17
+ st.subheader("AMS")
18
+ '''
19
+ This chatbot will look up from all Aerospace Mechanism Symposia in the following location: https://github.com/dsmueller3760/aerospace_chatbot/tree/main/data/AMS
20
+ * Available models: https://platform.openai.com/docs/models
21
+ * Model parameters: https://platform.openai.com/docs/api-reference/chat/create
22
+ * Pinecone: https://docs.pinecone.io/docs/projects#api-keys
23
+ * OpenAI API: https://platform.openai.com/api-keys
24
+ '''
25
+
26
+ # # Establish secrets
27
+ # PINECONE_ENVIRONMENT=os.getenv('PINECONE_ENVIRONMENT')
28
+ # PINECONE_API_KEY=os.getenv('PINECONE_API_KEY')
pages/1_Chatbot_AMS_Langchain.py ADDED
@@ -0,0 +1,152 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import queries
3
+ import pinecone
4
+ from dotenv import load_dotenv, find_dotenv
5
+ from langchain.embeddings import OpenAIEmbeddings
6
+ from langchain.llms import OpenAI
7
+ import streamlit as st
8
+ import openai
9
+ import time
10
+
11
+ from dotenv import load_dotenv,find_dotenv,dotenv_values
12
+ load_dotenv(find_dotenv(),override=True)
13
+
14
+ # Set secrets
15
+ # PINECONE_ENVIRONMENT=db.secrets.get('PINECONE_ENVIRONMENT')
16
+ # PINECONE_API_KEY=db.secrets.get('PINECONE_API_KEY')
17
+ PINECONE_ENVIRONMENT=os.getenv('PINECONE_ENVIRONMENT')
18
+ PINECONE_API_KEY=os.getenv('PINECONE_API_KEY')
19
+
20
+ # Set the page title
21
+ st.set_page_config(
22
+ page_title='Aerospace Chatbot: AMS w/Langchain',
23
+ )
24
+ st.title('Aerospace Mechanisms Chatbot')
25
+ with st.expander('''What's under the hood?'''):
26
+ st.markdown('''
27
+ This chatbot will look up from all Aerospace Mechanism Symposia in the following location: https://github.com/dsmueller3760/aerospace_chatbot/tree/main/data/AMS
28
+ * Source code: https://github.com/dsmueller3760/aerospace_chatbot/blob/main/scripts/setup_page_langchain.py
29
+ * Uses custom langchain functions with QA retrieval: https://js.langchain.com/docs/modules/chains/popular/chat_vector_db_legacy
30
+ * All prompts will query entire database unless 'filter response with last received sources' is activated.
31
+ * **Repsonse time ~10 seconds per prompt**.
32
+ ''')
33
+ filter_toggle=st.checkbox('Filter response with last received sources?')
34
+
35
+ # Add a sidebar for input options
36
+ st.title('Input')
37
+
38
+ # Add input fields in the sidebar
39
+ st.sidebar.title('Input options')
40
+ output_level = st.sidebar.selectbox('Level of Output', ['Concise', 'Detailed'], index=1)
41
+ k = st.sidebar.number_input('Number of items per prompt', min_value=1, step=1, value=4)
42
+ search_type = st.sidebar.selectbox('Search Type', ['similarity', 'mmr'], index=1)
43
+ temperature = st.sidebar.slider('Temperature', min_value=0.0, max_value=2.0, value=0.0, step=0.1)
44
+ verbose = st.sidebar.checkbox('Verbose output')
45
+ chain_type = st.sidebar.selectbox('Chain Type', ['stuff', 'map_reduce'], index=0)
46
+
47
+ # Vector databases
48
+ st.sidebar.title('Vector database')
49
+ index_type=st.sidebar.selectbox('Index type', ['Pinecone'], index=0)
50
+ index_name=st.sidebar.selectbox('Index name', ['canopy--ams'], index=0)
51
+
52
+ # Embeddings
53
+ st.sidebar.title('Embeddings')
54
+ embedding_type=st.sidebar.selectbox('Embedding type', ['Openai'], index=0)
55
+ embedding_name=st.sidebar.selectbox('Embedding name', ['text-embedding-ada-002'], index=0)
56
+
57
+ # Add a section for secret keys
58
+ st.sidebar.title('Secret keys')
59
+ OPENAI_API_KEY = st.sidebar.text_input('OpenAI API Key', type='password')
60
+
61
+ # Pinecone
62
+ pinecone.init(
63
+ api_key=PINECONE_API_KEY,
64
+ environment=PINECONE_ENVIRONMENT
65
+ )
66
+
67
+ if OPENAI_API_KEY:
68
+ openai.api_key = OPENAI_API_KEY
69
+ embeddings_model = OpenAIEmbeddings(model=embedding_name,openai_api_key=OPENAI_API_KEY)
70
+
71
+ # Set up chat history
72
+ qa_model_obj = st.session_state.get('qa_model_obj',[])
73
+ message_id = st.session_state.get('message_id', 0)
74
+
75
+ if 'messages' not in st.session_state:
76
+ st.session_state.messages = []
77
+ for message in st.session_state.messages:
78
+ with st.chat_message(message['role']):
79
+ st.markdown(message['content'])
80
+
81
+ # Process some items
82
+ if output_level == 'Concise':
83
+ out_token = 50
84
+ else:
85
+ out_token = 516
86
+
87
+ # Define LLM parameters and qa model object
88
+ llm = OpenAI(temperature=temperature,
89
+ openai_api_key=OPENAI_API_KEY,
90
+ max_tokens=out_token)
91
+ qa_model_obj=queries.QA_Model(index_name,
92
+ embeddings_model,
93
+ llm,
94
+ k,
95
+ search_type,
96
+ verbose,
97
+ filter_arg=False)
98
+
99
+ # Display assistant response in chat message container
100
+ if prompt := st.chat_input('Prompt here'):
101
+ st.session_state.messages.append({'role': 'user', 'content': prompt})
102
+ with st.chat_message('user'):
103
+ st.markdown(prompt)
104
+ with st.chat_message('assistant'):
105
+ message_placeholder = st.empty()
106
+
107
+ with st.status('Generating response...') as status:
108
+ t_start=time.time()
109
+
110
+ # Process some items
111
+ if output_level == 'Concise':
112
+ out_token = 50
113
+ else:
114
+ out_token = 516
115
+
116
+ # Define LLM parameters and qa model object
117
+ llm = OpenAI(temperature=temperature,
118
+ openai_api_key=OPENAI_API_KEY,
119
+ max_tokens=out_token)
120
+
121
+ message_id += 1
122
+ st.write('Message: '+str(message_id))
123
+
124
+ if message_id>1:
125
+ qa_model_obj=st.session_state['qa_model_obj']
126
+ qa_model_obj.update_model(llm,
127
+ k=k,
128
+ search_type=search_type,
129
+ verbose=verbose,
130
+ filter_arg=filter_toggle)
131
+ if filter_toggle:
132
+ filter_list = list(set(item['source'] for item in qa_model_obj.sources[-1]))
133
+ filter_items=[]
134
+ for item in filter_list:
135
+ filter_item={'source': item}
136
+ filter_items.append(filter_item)
137
+ filter={'$or':filter_items}
138
+
139
+ st.write('Searching vector database, generating prompt...')
140
+ qa_model_obj.query_docs(prompt)
141
+ ai_response=qa_model_obj.result['answer']
142
+ message_placeholder.markdown(ai_response)
143
+ t_delta=time.time() - t_start
144
+ status.update(label='Prompt generated in '+"{:10.3f}".format(t_delta)+' seconds', state='complete', expanded=False)
145
+
146
+ st.session_state['qa_model_obj'] = qa_model_obj
147
+ st.session_state['message_id'] = message_id
148
+ st.session_state.messages.append({'role': 'assistant', 'content': ai_response})
149
+
150
+ else:
151
+ st.warning('No API key found. Add your API key in the sidebar under Secret Keys. Find it or create one here: https://platform.openai.com/api-keys')
152
+ st.info('Your API-key is not stored in any form by this app. However, for transparency it is recommended to delete your API key once used.')
pages/2_Chatbot_AMS_Canopy.py ADDED
@@ -0,0 +1,157 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import queries
3
+ import pinecone
4
+ from langchain.embeddings import OpenAIEmbeddings
5
+ from langchain.llms import OpenAI
6
+ import streamlit as st
7
+ import openai
8
+ import time
9
+
10
+ from tqdm.auto import tqdm
11
+ from typing import Tuple
12
+
13
+ # from dotenv import load_dotenv,find_dotenv,dotenv_values
14
+ # load_dotenv(find_dotenv(),override=True)
15
+
16
+ from canopy.tokenizer import Tokenizer
17
+ from canopy.knowledge_base import KnowledgeBase
18
+ from canopy.context_engine import ContextEngine
19
+ from canopy.chat_engine import ChatEngine
20
+ from canopy.llm.openai import OpenAILLM
21
+ # from canopy.llm.models import ModelParams
22
+ from canopy.models.data_models import Document, Messages, UserMessage, AssistantMessage
23
+ from canopy.models.api_models import ChatResponse
24
+
25
+ def chat(new_message: str, history: Messages) -> Tuple[str, Messages, ChatResponse]:
26
+ messages = history + [UserMessage(content=new_message)]
27
+ response = chat_engine.chat(messages)
28
+ assistant_response = response.choices[0].message.content
29
+ return assistant_response, messages + [AssistantMessage(content=assistant_response)], response
30
+
31
+ # Set secrets
32
+ # PINECONE_ENVIRONMENT=db.secrets.get('PINECONE_ENVIRONMENT')
33
+ # PINECONE_API_KEY=db.secrets.get('PINECONE_API_KEY')
34
+ PINECONE_ENVIRONMENT=os.getenv('PINECONE_ENVIRONMENT')
35
+ PINECONE_API_KEY=os.getenv('PINECONE_API_KEY')
36
+
37
+ # Set the page title
38
+ st.set_page_config(
39
+ page_title='Aerospace Chatbot: AMS w/Langchain',
40
+ )
41
+ st.title('Aerospace Mechanisms Chatbot')
42
+ with st.expander('''What's under the hood?'''):
43
+ st.markdown('''
44
+ This chatbot will look up from all Aerospace Mechanism Symposia in the following location: https://github.com/dsmueller3760/aerospace_chatbot/tree/main/data/AMS
45
+ * Source code: https://github.com/dsmueller3760/aerospace_chatbot/blob/main/scripts/setup_page_canopy.py
46
+ * Uses pinecone canopy: https://www.pinecone.io/blog/canopy-rag-framework/
47
+ * **Response time ~45 seconds per prompt**
48
+ ''')
49
+
50
+ # Add a sidebar for input options
51
+ st.title('Input')
52
+ st.sidebar.title('Input Options')
53
+
54
+ # Add input fields in the sidebar
55
+ model_name=st.sidebar.selectbox('Model', ['gpt-3.5-turbo''gpt-3.5-turbo-16k','gpt-3.5-turbo','gpt-3.5-turbo-1106','gpt-4','gpt-4-32k'], index=1)
56
+ model_list={'gpt-3.5-turbo':4096,
57
+ 'gpt-3.5-turbo-16k':16385,
58
+ 'gpt-3.5-turbo-1106':16385,
59
+ 'gpt-4':8192,
60
+ 'gpt-4-32k':32768}
61
+ temperature = st.sidebar.slider('Temperature', min_value=0.0, max_value=2.0, value=0.0, step=0.1)
62
+ n=None # Not used. How many chat completion choices to generate for each input message.
63
+ top_p=None # Not used. Only use this or temperature. Where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered.
64
+
65
+ k=st.sidebar.number_input('Number document chunks per query', min_value=1, step=1, value=15)
66
+ output_level=st.sidebar.selectbox('Level of Output', ['Concise', 'Detailed', 'No Limit'], index=2)
67
+ max_prompt_tokens=model_list[model_name]
68
+
69
+ # Vector databases
70
+ st.sidebar.title('Vector Database')
71
+ index_name=st.sidebar.selectbox('Index name', ['canopy--ams'], index=0)
72
+
73
+ # Embeddings
74
+ st.sidebar.title('Embeddings')
75
+ embedding_type=st.sidebar.selectbox('Embedding type', ['Openai'], index=0)
76
+ embedding_name=st.sidebar.selectbox('Embedding name', ['text-embedding-ada-002'], index=0)
77
+
78
+ # Add a section for secret keys
79
+ st.sidebar.title('Secret Keys')
80
+ OPENAI_API_KEY = st.sidebar.text_input('OpenAI API Key', type='password')
81
+
82
+
83
+
84
+ if OPENAI_API_KEY:
85
+ openai.api_key = OPENAI_API_KEY
86
+ embeddings_model = OpenAIEmbeddings(model=embedding_name,openai_api_key=OPENAI_API_KEY)
87
+
88
+ # Set up chat history
89
+ qa_model_obj = st.session_state.get('qa_model_obj',[])
90
+ message_id = st.session_state.get('message_id', 0)
91
+ history = st.session_state.get('history',[])
92
+
93
+ if 'messages' not in st.session_state:
94
+ st.session_state.messages = []
95
+ for message in st.session_state.messages:
96
+ with st.chat_message(message['role']):
97
+ st.markdown(message['content'])
98
+
99
+ # Process some items
100
+ if output_level == 'Concise':
101
+ out_token = 50
102
+ else:
103
+ out_token = 516
104
+
105
+ # Display assistant response in chat message container
106
+ if prompt := st.chat_input('Prompt here'):
107
+ st.session_state.messages.append({'role': 'user', 'content': prompt})
108
+ with st.chat_message('user'):
109
+ st.markdown(prompt)
110
+ with st.chat_message('assistant'):
111
+ message_placeholder = st.empty()
112
+
113
+ with st.status('Generating response...') as status:
114
+ t_start=time.time()
115
+ message_id += 1
116
+ st.write('Message: '+str(message_id))
117
+
118
+ # Process some items
119
+ if output_level == 'Concise':
120
+ max_generated_tokens = 50
121
+ elif output_level == 'Detailed':
122
+ max_generated_tokens = 516
123
+ else:
124
+ max_generated_tokens = None
125
+
126
+ # Inialize canopy
127
+ Tokenizer.initialize()
128
+ pinecone.init(
129
+ api_key=PINECONE_API_KEY,
130
+ environment=PINECONE_ENVIRONMENT
131
+ )
132
+
133
+ kb = KnowledgeBase(index_name=index_name,
134
+ default_top_k=k)
135
+ kb.connect()
136
+ context_engine = ContextEngine(kb)
137
+ llm=OpenAILLM(model_name=model_name)
138
+ chat_engine = ChatEngine(context_engine,
139
+ llm=llm,
140
+ max_generated_tokens=max_generated_tokens,
141
+ max_prompt_tokens=max_prompt_tokens)
142
+
143
+ st.write('Searching vector database, generating prompt...')
144
+ response, history, chat_response = chat(prompt, history)
145
+
146
+ message_placeholder.markdown(response)
147
+ t_delta=time.time() - t_start
148
+ status.update(label='Prompt generated in '+"{:10.3f}".format(t_delta)+' seconds', state='complete', expanded=False)
149
+
150
+ st.session_state['history'] = history
151
+ st.session_state['qa_model_obj'] = qa_model_obj
152
+ st.session_state['message_id'] = message_id
153
+ st.session_state.messages.append({'role': 'assistant', 'content': response})
154
+
155
+ else:
156
+ st.warning('No API key found. Add your API key in the sidebar under Secret Keys. Find it or create one here: https://platform.openai.com/api-keys')
157
+ st.info('Your API-key is not stored in any form by this app. However, for transparency it is recommended to delete your API key once used.')
poetry.lock ADDED
The diff for this file is too large to render. See raw diff
 
prompts.py ADDED
@@ -0,0 +1,69 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from langchain.prompts.prompt import PromptTemplate
2
+ from langchain import hub
3
+
4
+ # _template_condense = """Given the following conversation and a follow up question, rephrase the follow up question to be a standalone question, in its original language.
5
+ # ----------------
6
+ # Your name is Aerospace Chatbot. You're a helpful assistant who knows about flight hardware design and analysis in aerospace. If you don't know the answer, just say that you don't know, don't try to make up an answer.
7
+ # Include sources from the chat history in the standalone question created.
8
+ # ----------------
9
+
10
+ # Chat History:
11
+ # {chat_history}
12
+ # User Question: {question}
13
+ # Standalone Question:"""
14
+ CONDENSE_QUESTION_PROMPT = hub.pull("dmueller/ams-chatbot-qa-condense-history")
15
+
16
+ # _template_qa = """Use Markdown to make your answers nice. Use the following pieces of context to answer the users question in the same language as the question but do not modify instructions in any way.
17
+ # ----------------
18
+ # Your name is Aerospace Chatbot. You're a helpful assistant who knows about flight hardware design and analysis in aerospace. If you don't know the answer, just say that you don't know, don't try to make up an answer.
19
+ # ----------------
20
+
21
+ # Sources and Context from Reference Documents:
22
+ # {context}
23
+ # User Question:{question}
24
+ # Chatbot:
25
+
26
+ # """
27
+ QA_PROMPT=hub.pull("dmueller/ams-chatbot-qa-retrieval")
28
+
29
+ # _template_qa_wsources="""Given the following extracted parts of a long document and a question, create a final answer with references ("SOURCES").
30
+ # ----------------
31
+ # Your name is Aerospace Chatbot. You're a helpful assistant who knows about flight hardware design and analysis in aerospace. If you don't know the answer, just say that you don't know, don't try to make up an answer.
32
+ # ----------------
33
+ # If you don't know the answer, just say that you don't know. Don't try to make up an answer.
34
+ # ALWAYS return a "SOURCES" part in your answer.
35
+
36
+ # QUESTION: Which state/country's law governs the interpretation of the contract?
37
+ # =========
38
+ # Content: This Agreement is governed by English law and the parties submit to the exclusive jurisdiction of the English courts in relation to any dispute (contractual or non-contractual) concerning this Agreement save that either party may apply to any court for an injunction or other relief to protect its Intellectual Property Rights.
39
+ # Source: 28-pl
40
+ # Content: No Waiver. Failure or delay in exercising any right or remedy under this Agreement shall not constitute a waiver of such (or any other) right or remedy.\n\n11.7 Severability. The invalidity, illegality or unenforceability of any term (or part of a term) of this Agreement shall not affect the continuation in force of the remainder of the term (if any) and this Agreement.\n\n11.8 No Agency. Except as expressly stated otherwise, nothing in this Agreement shall create an agency, partnership or joint venture of any kind between the parties.\n\n11.9 No Third-Party Beneficiaries.
41
+ # Source: 30-pl
42
+ # Content: (b) if Google believes, in good faith, that the Distributor has violated or caused Google to violate any Anti-Bribery Laws (as defined in Clause 8.5) or that such a violation is reasonably likely to occur,
43
+ # Source: 4-pl
44
+ # =========
45
+ # FINAL ANSWER: This Agreement is governed by English law.
46
+ # SOURCES: 28-pl
47
+
48
+ # QUESTION: What did the president say about Michael Jackson?
49
+ # =========
50
+ # Content: Madam Speaker, Madam Vice President, our First Lady and Second Gentleman. Members of Congress and the Cabinet. Justices of the Supreme Court. My fellow Americans. \n\nLast year COVID-19 kept us apart. This year we are finally together again. \n\nTonight, we meet as Democrats Republicans and Independents. But most importantly as Americans. \n\nWith a duty to one another to the American people to the Constitution. \n\nAnd with an unwavering resolve that freedom will always triumph over tyranny. \n\nSix days ago, Russia’s Vladimir Putin sought to shake the foundations of the free world thinking he could make it bend to his menacing ways. But he badly miscalculated. \n\nHe thought he could roll into Ukraine and the world would roll over. Instead he met a wall of strength he never imagined. \n\nHe met the Ukrainian people. \n\nFrom President Zelenskyy to every Ukrainian, their fearlessness, their courage, their determination, inspires the world. \n\nGroups of citizens blocking tanks with their bodies. Everyone from students to retirees teachers turned soldiers defending their homeland.
51
+ # Source: 0-pl
52
+ # Content: And we won’t stop. \n\nWe have lost so much to COVID-19. Time with one another. And worst of all, so much loss of life. \n\nLet’s use this moment to reset. Let’s stop looking at COVID-19 as a partisan dividing line and see it for what it is: A God-awful disease. \n\nLet’s stop seeing each other as enemies, and start seeing each other for who we really are: Fellow Americans. \n\nWe can’t change how divided we’ve been. But we can change how we move forward—on COVID-19 and other issues we must face together. \n\nI recently visited the New York City Police Department days after the funerals of Officer Wilbert Mora and his partner, Officer Jason Rivera. \n\nThey were responding to a 9-1-1 call when a man shot and killed them with a stolen gun. \n\nOfficer Mora was 27 years old. \n\nOfficer Rivera was 22. \n\nBoth Dominican Americans who’d grown up on the same streets they later chose to patrol as police officers. \n\nI spoke with their families and told them that we are forever in debt for their sacrifice, and we will carry on their mission to restore the trust and safety every community deserves.
53
+ # Source: 24-pl
54
+ # Content: And a proud Ukrainian people, who have known 30 years of independence, have repeatedly shown that they will not tolerate anyone who tries to take their country backwards. \n\nTo all Americans, I will be honest with you, as I’ve always promised. A Russian dictator, invading a foreign country, has costs around the world. \n\nAnd I’m taking robust action to make sure the pain of our sanctions is targeted at Russia’s economy. And I will use every tool at our disposal to protect American businesses and consumers. \n\nTonight, I can announce that the United States has worked with 30 other countries to release 60 Million barrels of oil from reserves around the world. \n\nAmerica will lead that effort, releasing 30 Million barrels from our own Strategic Petroleum Reserve. And we stand ready to do more if necessary, unified with our allies. \n\nThese steps will help blunt gas prices here at home. And I know the news about what’s happening can seem alarming. \n\nBut I want you to know that we are going to be okay.
55
+ # Source: 5-pl
56
+ # Content: More support for patients and families. \n\nTo get there, I call on Congress to fund ARPA-H, the Advanced Research Projects Agency for Health. \n\nIt’s based on DARPA—the Defense Department project that led to the Internet, GPS, and so much more. \n\nARPA-H will have a singular purpose—to drive breakthroughs in cancer, Alzheimer’s, diabetes, and more. \n\nA unity agenda for the nation. \n\nWe can do this. \n\nMy fellow Americans—tonight , we have gathered in a sacred space—the citadel of our democracy. \n\nIn this Capitol, generation after generation, Americans have debated great questions amid great strife, and have done great things. \n\nWe have fought for freedom, expanded liberty, defeated totalitarianism and terror. \n\nAnd built the strongest, freest, and most prosperous nation the world has ever known. \n\nNow is the hour. \n\nOur moment of responsibility. \n\nOur test of resolve and conscience, of history itself. \n\nIt is in this moment that our character is formed. Our purpose is found. Our future is forged. \n\nWell I know this nation.
57
+ # Source: 34-pl
58
+ # =========
59
+ # FINAL ANSWER: The president did not mention Michael Jackson.
60
+ # SOURCES:
61
+
62
+ # QUESTION: {question}
63
+ # =========
64
+ # {summaries}
65
+ # =========
66
+ # FINAL ANSWER:"""
67
+ QA_WSOURCES_PROMPT=hub.pull("dmueller/ams-chatbot-qa-retrieval-wsources")
68
+
69
+ QA_GENERATE_PROMPT=hub.pull("dmueller/generate_qa_prompt")
pyproject.toml ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [tool.poetry]
2
+ name = "chatbot-ams-langchain"
3
+ version = "0.1.0"
4
+ description = ""
5
+ authors = ["Dan Mueller <dsm@danmueller.pro>"]
6
+ readme = "README.md"
7
+
8
+ [tool.poetry.dependencies]
9
+ python = "^3.11"
10
+ langchain = "^0.0.348"
11
+ streamlit = "^1.29.0"
12
+ python-dotenv = "^1.0.0"
13
+ tqdm = "^4.66.1"
14
+ ipykernel = "^6.27.1"
15
+ langchainhub = "^0.1.14"
16
+ canopy-sdk = "0.1.3"
17
+ canopy = "^8.42"
18
+ openai = "0.27.5"
19
+
20
+
21
+ [build-system]
22
+ requires = ["poetry-core"]
23
+ build-backend = "poetry.core.masonry.api"
queries.py ADDED
@@ -0,0 +1,145 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ @author: dsmueller3760
3
+ Query from pinecone embeddings
4
+ """
5
+ from dotenv import load_dotenv, find_dotenv
6
+ from langchain.vectorstores import Pinecone
7
+ from langchain.embeddings import OpenAIEmbeddings
8
+ from langchain.llms import OpenAI
9
+
10
+ from langchain.chains.qa_with_sources import load_qa_with_sources_chain
11
+ from langchain.chains import ConversationalRetrievalChain
12
+ from langchain.memory import ConversationBufferMemory
13
+ from langchain.chains.llm import LLMChain
14
+
15
+ import os
16
+ import pinecone
17
+
18
+ from prompts import CONDENSE_QUESTION_PROMPT, QA_PROMPT, QA_WSOURCES_PROMPT
19
+
20
+
21
+
22
+ class QA_Model:
23
+ def __init__(self,
24
+ index_name,
25
+ embeddings_model,
26
+ llm,
27
+ k=6,
28
+ search_type='similarity',
29
+ temperature=0,
30
+ verbose=False,
31
+ chain_type='stuff',
32
+ filter_arg=False):
33
+
34
+ self.index_name:str=index_name
35
+ self.embeddings_model:OpenAIEmbeddings=embeddings_model
36
+ self.llm=llm
37
+ self.k:int=k
38
+ self.search_type:str=search_type
39
+ self.temperature:int=temperature
40
+ self.verbose:bool=verbose
41
+ self.chain_type:str=chain_type
42
+ self.filter_arg:bool=filter_arg
43
+
44
+ load_dotenv(find_dotenv(),override=True)
45
+
46
+ # Read in from the vector database
47
+ self.vectorstore = Pinecone.from_existing_index(index_name,embeddings_model)
48
+
49
+ # Set up question generator and qa with sources
50
+ self.question_generator = LLMChain(llm=llm,
51
+ prompt=CONDENSE_QUESTION_PROMPT,
52
+ verbose=verbose)
53
+ self.doc_chain = load_qa_with_sources_chain(llm, chain_type=chain_type,prompt=QA_WSOURCES_PROMPT,verbose=verbose)
54
+
55
+ # Establish chat history
56
+ self.chat_history=ConversationBufferMemory(memory_key='chat_history',
57
+ input_key='question',
58
+ output_key='answer',
59
+ return_messages=True)
60
+
61
+ # Implement filter
62
+ if filter_arg:
63
+ filter_list = list(set(item["source"] for item in self.sources[-1]))
64
+ filter_items=[]
65
+ for item in filter_list:
66
+ filter_item={"source": item}
67
+ filter_items.append(filter_item)
68
+ filter={"$or":filter_items}
69
+ else:
70
+ filter=None
71
+
72
+ if search_type=='mmr':
73
+ search_kwargs={'k':k,'fetch_k':50,'filter':filter} # See as_retriever docs for parameters
74
+ else:
75
+ search_kwargs={'k':k,'filter':filter} # See as_retriever docs for parameters
76
+
77
+ self.qa = ConversationalRetrievalChain(
78
+ retriever=self.vectorstore.as_retriever(search_type=search_type,
79
+ search_kwargs=search_kwargs),
80
+ combine_docs_chain=self.doc_chain,
81
+ question_generator=self.question_generator,
82
+ memory=self.chat_history,
83
+ verbose=verbose,
84
+ return_source_documents=True,
85
+ return_generated_question=True,
86
+ )
87
+
88
+ self.sources=[]
89
+
90
+ def query_docs(self,query,tags=None):
91
+ self.result=self.qa({'question': query},tags=tags)
92
+
93
+ # print('-------------')
94
+ # print(query+'\n')
95
+ # print(self.result['answer']+'\n\n'+'Sources:'+'\n')
96
+
97
+ temp_sources=[]
98
+ for data in self.result['source_documents']:
99
+ temp_sources.append(data.metadata)
100
+ # print(data.metadata)
101
+
102
+ self.sources.append(temp_sources)
103
+ # print('\nGenerated question: '+self.result['generated_question'])
104
+ # print('-------------\n')
105
+
106
+ def update_model(self,llm,
107
+ k=6,
108
+ search_type='similarity',
109
+ fetch_k=50,
110
+ verbose=None,
111
+ filter_arg=False):
112
+
113
+ self.llm=llm
114
+
115
+ # Set up question generator and qa with sources
116
+ self.question_generator = LLMChain(llm=self.llm, prompt=CONDENSE_QUESTION_PROMPT,verbose=verbose)
117
+ self.doc_chain = load_qa_with_sources_chain(self.llm, chain_type=self.chain_type,prompt=QA_WSOURCES_PROMPT,verbose=verbose)
118
+
119
+ # Implement filter
120
+ if filter_arg:
121
+ print(self.sources)
122
+ filter_list = list(set(item["source"] for item in self.sources[-1]))
123
+ filter_items=[]
124
+ for item in filter_list:
125
+ filter_item={"source": item}
126
+ filter_items.append(filter_item)
127
+ filter={"$or":filter_items}
128
+ else:
129
+ filter=None
130
+
131
+ if search_type=='mmr':
132
+ search_kwargs={'k':k,'fetch_k':fetch_k,'filter':filter} # See as_retriever docs for parameters
133
+ else:
134
+ search_kwargs={'k':k,'filter':filter} # See as_retriever docs for parameters
135
+
136
+ self.qa = ConversationalRetrievalChain(
137
+ retriever=self.vectorstore.as_retriever(search_type=search_type,
138
+ search_kwargs=search_kwargs),
139
+ combine_docs_chain=self.doc_chain,
140
+ question_generator=self.question_generator,
141
+ memory=self.chat_history,
142
+ verbose=verbose,
143
+ return_source_documents=True,
144
+ return_generated_question=True,
145
+ )
requirements.txt ADDED
@@ -0,0 +1,138 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ aiobotocore==2.8.0
2
+ aiohttp==3.9.1
3
+ aioitertools==0.11.0
4
+ aiosignal==1.3.1
5
+ altair==5.2.0
6
+ anyio==3.7.1
7
+ appnope==0.1.3
8
+ asttokens==2.4.1
9
+ attrs==23.1.0
10
+ blinker==1.7.0
11
+ botocore==1.33.1
12
+ cachetools==5.3.2
13
+ canopy==8.42
14
+ canopy-sdk==0.1.3
15
+ certifi==2023.11.17
16
+ charset-normalizer==3.3.2
17
+ click==8.1.7
18
+ comm==0.2.0
19
+ dataclasses-json==0.6.3
20
+ debugpy==1.8.0
21
+ decorator==5.1.1
22
+ dnspython==2.4.2
23
+ executing==2.0.1
24
+ fastapi==0.92.0
25
+ frozenlist==1.4.0
26
+ fsspec==2023.12.1
27
+ gcsfs==2023.12.1
28
+ gitdb==4.0.11
29
+ GitPython==3.1.40
30
+ google-api-core==2.15.0
31
+ google-auth==2.25.2
32
+ google-auth-oauthlib==1.1.0
33
+ google-cloud-core==2.4.1
34
+ google-cloud-storage==2.13.0
35
+ google-crc32c==1.5.0
36
+ google-resumable-media==2.6.0
37
+ googleapis-common-protos==1.62.0
38
+ gunicorn==21.2.0
39
+ h11==0.14.0
40
+ idna==3.6
41
+ importlib-metadata==6.11.0
42
+ ipykernel==6.27.1
43
+ ipython==8.18.1
44
+ jedi==0.19.1
45
+ Jinja2==3.1.2
46
+ jmespath==1.0.1
47
+ joblib==1.3.2
48
+ jsonpatch==1.33
49
+ jsonpointer==2.4
50
+ jsonschema==4.20.0
51
+ jsonschema-specifications==2023.11.2
52
+ jupyter_client==8.6.0
53
+ jupyter_core==5.5.0
54
+ langchain==0.0.348
55
+ langchain-core==0.0.12
56
+ langchainhub==0.1.14
57
+ langsmith==0.0.69
58
+ loguru==0.7.2
59
+ markdown-it-py==3.0.0
60
+ MarkupSafe==2.1.3
61
+ marshmallow==3.20.1
62
+ matplotlib-inline==0.1.6
63
+ mdurl==0.1.2
64
+ mmh3==3.1.0
65
+ multidict==6.0.4
66
+ munch==4.0.0
67
+ mypy-extensions==1.0.0
68
+ nest-asyncio==1.5.8
69
+ nltk==3.8.1
70
+ numpy==1.25.2
71
+ oauthlib==3.2.2
72
+ openai==0.27.5
73
+ packaging==23.2
74
+ pandas==2.1.4
75
+ pandas-stubs==2.0.3.230814
76
+ parso==0.8.3
77
+ pexpect==4.9.0
78
+ Pillow==10.1.0
79
+ pinecone-client==2.2.4
80
+ pinecone-datasets==0.6.2
81
+ pinecone-text==0.6.1
82
+ platformdirs==4.1.0
83
+ prompt-toolkit==3.0.41
84
+ protobuf==4.25.1
85
+ psutil==5.9.6
86
+ ptyprocess==0.7.0
87
+ pure-eval==0.2.2
88
+ pyarrow==11.0.0
89
+ pyasn1==0.5.1
90
+ pyasn1-modules==0.3.0
91
+ pydantic==1.10.13
92
+ pydeck==0.8.0
93
+ Pygments==2.17.2
94
+ python-dateutil==2.8.2
95
+ python-dotenv==1.0.0
96
+ pytz==2023.3.post1
97
+ PyYAML==6.0.1
98
+ pyzmq==25.1.2
99
+ referencing==0.32.0
100
+ regex==2023.10.3
101
+ requests==2.31.0
102
+ requests-oauthlib==1.3.1
103
+ rich==13.7.0
104
+ rpds-py==0.13.2
105
+ rsa==4.9
106
+ s3fs==2023.12.1
107
+ six==1.16.0
108
+ smmap==5.0.1
109
+ sniffio==1.3.0
110
+ SQLAlchemy==2.0.23
111
+ sse-starlette==1.8.2
112
+ stack-data==0.6.3
113
+ starlette==0.25.0
114
+ streamlit==1.29.0
115
+ tenacity==8.2.3
116
+ tiktoken==0.3.3
117
+ toml==0.10.2
118
+ toolz==0.12.0
119
+ tornado==6.4
120
+ tqdm==4.66.1
121
+ traitlets==5.14.0
122
+ types-jsonschema==4.20.0.0
123
+ types-pytz==2023.3.1.1
124
+ types-PyYAML==6.0.12.12
125
+ types-requests==2.31.0.10
126
+ types-tqdm==4.66.0.5
127
+ typing-inspect==0.9.0
128
+ typing_extensions==4.8.0
129
+ tzdata==2023.3
130
+ tzlocal==5.2
131
+ urllib3==2.0.7
132
+ uvicorn==0.20.0
133
+ validators==0.22.0
134
+ wcwidth==0.2.12
135
+ wget==3.2
136
+ wrapt==1.16.0
137
+ yarl==1.9.4
138
+ zipp==3.17.0