Adrian Cowham commited on
Commit
28333f8
β€’
1 Parent(s): 0c47d68

adding friends

Browse files
Files changed (5) hide show
  1. .gitattributes +2 -0
  2. README.md +4 -4
  3. requirements.txt +14 -0
  4. src/app.py +10 -17
  5. src/core/embedding.py +12 -23
.gitattributes CHANGED
@@ -36,3 +36,5 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
36
  resources/design-by-fire.pdf filter=lfs diff=lfs merge=lfs -text
37
  resources/lets-talk.pdf filter=lfs diff=lfs merge=lfs -text
38
  resources/progit.pdf filter=lfs diff=lfs merge=lfs -text
 
 
 
36
  resources/design-by-fire.pdf filter=lfs diff=lfs merge=lfs -text
37
  resources/lets-talk.pdf filter=lfs diff=lfs merge=lfs -text
38
  resources/progit.pdf filter=lfs diff=lfs merge=lfs -text
39
+ resources filter=lfs diff=lfs merge=lfs -text
40
+ resources/How_To_Win_Friends_And_Influence_People_-_Dale_Carnegie.pdf filter=lfs diff=lfs merge=lfs -text
README.md CHANGED
@@ -1,8 +1,8 @@
1
  ---
2
- title: Pyrocene
3
- emoji: πŸŒ–
4
- colorFrom: green
5
- colorTo: green
6
  sdk: gradio
7
  sdk_version: 3.40.1
8
  app_file: src/app.py
 
1
  ---
2
+ title: friends
3
+ emoji: πŸ‘―β€β™€οΈ
4
+ colorFrom: yellow
5
+ colorTo: yellow
6
  sdk: gradio
7
  sdk_version: 3.40.1
8
  app_file: src/app.py
requirements.txt CHANGED
@@ -39,6 +39,7 @@ importlib-resources==6.0.1
39
  isodate==0.6.1
40
  itsdangerous==2.1.2
41
  Jinja2==3.1.2
 
42
  jsonschema==4.19.0
43
  jsonschema-specifications==2023.7.1
44
  kiwisolver==1.4.4
@@ -53,11 +54,13 @@ marshmallow==3.20.1
53
  matplotlib==3.7.2
54
  mdit-py-plugins==0.3.3
55
  mdurl==0.1.2
 
56
  multidict==6.0.4
57
  mypy-extensions==1.0.0
58
  networkx==3.1
59
  nibabel==5.1.0
60
  nipype==1.8.6
 
61
  numexpr==2.8.5
62
  numpy==1.25.2
63
  openai==0.27.8
@@ -78,23 +81,34 @@ python-multipart==0.0.6
78
  pytz==2023.3
79
  pyxnat==1.6
80
  PyYAML==6.0.1
 
81
  rdflib==7.0.0
82
  referencing==0.30.2
83
  regex==2023.8.8
84
  requests==2.31.0
85
  rpds-py==0.9.2
 
 
86
  scipy==1.11.2
87
  semantic-version==2.10.0
 
 
88
  simplejson==3.19.1
89
  six==1.16.0
90
  sniffio==1.3.0
91
  SQLAlchemy==2.0.20
92
  starlette==0.27.0
 
93
  tenacity==8.2.3
 
94
  tiktoken==0.4.0
 
95
  toolz==0.12.0
 
 
96
  tqdm==4.66.1
97
  traits==6.3.2
 
98
  typing-inspect==0.9.0
99
  typing_extensions==4.7.1
100
  tzdata==2023.3
 
39
  isodate==0.6.1
40
  itsdangerous==2.1.2
41
  Jinja2==3.1.2
42
+ joblib==1.3.2
43
  jsonschema==4.19.0
44
  jsonschema-specifications==2023.7.1
45
  kiwisolver==1.4.4
 
54
  matplotlib==3.7.2
55
  mdit-py-plugins==0.3.3
56
  mdurl==0.1.2
57
+ mpmath==1.3.0
58
  multidict==6.0.4
59
  mypy-extensions==1.0.0
60
  networkx==3.1
61
  nibabel==5.1.0
62
  nipype==1.8.6
63
+ nltk==3.8.1
64
  numexpr==2.8.5
65
  numpy==1.25.2
66
  openai==0.27.8
 
81
  pytz==2023.3
82
  pyxnat==1.6
83
  PyYAML==6.0.1
84
+ rank-bm25==0.2.2
85
  rdflib==7.0.0
86
  referencing==0.30.2
87
  regex==2023.8.8
88
  requests==2.31.0
89
  rpds-py==0.9.2
90
+ safetensors==0.3.2
91
+ scikit-learn==1.3.0
92
  scipy==1.11.2
93
  semantic-version==2.10.0
94
+ sentence-transformers==2.2.2
95
+ sentencepiece==0.1.99
96
  simplejson==3.19.1
97
  six==1.16.0
98
  sniffio==1.3.0
99
  SQLAlchemy==2.0.20
100
  starlette==0.27.0
101
+ sympy==1.12
102
  tenacity==8.2.3
103
+ threadpoolctl==3.2.0
104
  tiktoken==0.4.0
105
+ tokenizers==0.13.3
106
  toolz==0.12.0
107
+ torch==2.0.1
108
+ torchvision==0.15.2
109
  tqdm==4.66.1
110
  traits==6.3.2
111
+ transformers==4.31.0
112
  typing-inspect==0.9.0
113
  typing_extensions==4.7.1
114
  tzdata==2023.3
src/app.py CHANGED
@@ -23,8 +23,8 @@ K = 5
23
  USE_VERBOSE = True
24
  API_KEY = os.environ["OPENAI_API_KEY"]
25
  system_template = """
26
- The context below contains excerpts from 'Design by Fire,' by Emily Elizabeth Schlickman and Brett Milligan. You must only use the information in the context below to formulate your response. If there is not enough information to formulate a response, you must respond with
27
- "I'm sorry, but I can't find the answer to your question in, the book Design by Fire."
28
 
29
  Here is the context:
30
  {context}
@@ -43,7 +43,7 @@ class AnswerConversationBufferMemory(ConversationBufferMemory):
43
  return super(AnswerConversationBufferMemory, self).save_context(inputs,{'response': outputs['answer']})
44
 
45
  def getretriever():
46
- with open("./resources/design-by-fire.pdf", 'rb') as uploaded_file:
47
  try:
48
  file = read_file(uploaded_file)
49
  except Exception as e:
@@ -95,26 +95,19 @@ def load_chain(inputs = None):
95
  combine_docs_chain_kwargs={"prompt": qa_prompt})
96
  return chain
97
 
98
- CSS ="""
99
- .contain { display: flex; flex-direction: column; }
100
- .gradio-container { height: 100vh !important; }
101
- #component-0 { height: 100%; }
102
- #chatbot { flex-grow: 1; overflow: auto;}
103
- """
104
-
105
  with gr.Blocks() as block:
106
  with gr.Row():
107
  with gr.Column(scale=0.75):
108
  with gr.Row():
109
- gr.Markdown("<h1>Design by Fire</h1>")
110
  with gr.Row():
111
- gr.Markdown("by Emily Elizabeth Schlickman and Brett Milligan")
112
  chatbot = gr.Chatbot(elem_id="chatbot").style(height=600)
113
 
114
  with gr.Row():
115
  message = gr.Textbox(
116
  label="",
117
- placeholder="Design by Fire",
118
  lines=1,
119
  )
120
  with gr.Row():
@@ -129,11 +122,11 @@ with gr.Blocks() as block:
129
  with gr.Column(scale=0.25):
130
  with gr.Row():
131
  gr.Markdown("<h1><center>Suggestions</center></h1>")
132
- ex1 = gr.Button(value="What are the main factors and trends discussed in the book that contribute to the changing behavior of wildfires?", variant="primary")
133
  ex1.click(getanswer, inputs=[chain_state, ex1, state], outputs=[chatbot, state, message])
134
- ex2 = gr.Button(value="How does the book explore the relationship between fire and different landscapes, such as wilderness and urban areas?", variant="primary")
135
  ex2.click(getanswer, inputs=[chain_state, ex2, state], outputs=[chatbot, state, message])
136
- ex3 = gr.Button(value="What are the three approaches to designing with fire that the book presents?", variant="primary")
137
- ex3.click(getanswer, inputs=[chain_state, ex3, state], outputs=[chatbot, state, message])
138
 
139
  block.launch(debug=True)
 
23
  USE_VERBOSE = True
24
  API_KEY = os.environ["OPENAI_API_KEY"]
25
  system_template = """
26
+ The context below contains excerpts from 'How to Win Friends & Influence People,' by Dail Carnegie. You must only use the information in the context below to formulate your response. If there is not enough information to formulate a response, you must respond with
27
+ "I'm sorry, but I can't find the answer to your question in, the book How to Win Friends & Influence People."
28
 
29
  Here is the context:
30
  {context}
 
43
  return super(AnswerConversationBufferMemory, self).save_context(inputs,{'response': outputs['answer']})
44
 
45
  def getretriever():
46
+ with open("./resources/How_To_Win_Friends_And_Influence_People_-_Dale_Carnegie.pdf", 'rb') as uploaded_file:
47
  try:
48
  file = read_file(uploaded_file)
49
  except Exception as e:
 
95
  combine_docs_chain_kwargs={"prompt": qa_prompt})
96
  return chain
97
 
 
 
 
 
 
 
 
98
  with gr.Blocks() as block:
99
  with gr.Row():
100
  with gr.Column(scale=0.75):
101
  with gr.Row():
102
+ gr.Markdown("<h1>How to Win Friends & Influence People</h1>")
103
  with gr.Row():
104
+ gr.Markdown("by Dale Carnegie")
105
  chatbot = gr.Chatbot(elem_id="chatbot").style(height=600)
106
 
107
  with gr.Row():
108
  message = gr.Textbox(
109
  label="",
110
+ placeholder="How to Win Friends...",
111
  lines=1,
112
  )
113
  with gr.Row():
 
122
  with gr.Column(scale=0.25):
123
  with gr.Row():
124
  gr.Markdown("<h1><center>Suggestions</center></h1>")
125
+ ex1 = gr.Button(value="How do I know if I'm talking about myself too much?", variant="primary")
126
  ex1.click(getanswer, inputs=[chain_state, ex1, state], outputs=[chatbot, state, message])
127
+ ex2 = gr.Button(value="What do people enjoy talking about the most?", variant="primary")
128
  ex2.click(getanswer, inputs=[chain_state, ex2, state], outputs=[chatbot, state, message])
129
+ ex4 = gr.Button(value="Why should I try to get along with people better?", variant="primary")
130
+ ex4.click(getanswer, inputs=[chain_state, ex4, state], outputs=[chatbot, state, message])
131
 
132
  block.launch(debug=True)
src/core/embedding.py CHANGED
@@ -1,7 +1,7 @@
1
  from typing import List, Type
2
 
3
  from langchain.docstore.document import Document
4
- from langchain.embeddings import OpenAIEmbeddings
5
  from langchain.embeddings.base import Embeddings
6
  from langchain.vectorstores import VectorStore
7
  from langchain.vectorstores.faiss import FAISS
@@ -50,27 +50,16 @@ class FolderIndex:
50
  def embed_files(
51
  files: List[File], embedding: str, vector_store: str, **kwargs
52
  ) -> FolderIndex:
53
- """Embeds a collection of files and stores them in a FolderIndex."""
54
-
55
- supported_embeddings: dict[str, Type[Embeddings]] = {
56
- "openai": OpenAIEmbeddings,
57
- "debug": FakeEmbeddings,
58
- }
59
- supported_vector_stores: dict[str, Type[VectorStore]] = {
60
- "faiss": FAISS,
61
- "debug": FakeVectorStore,
62
- }
63
-
64
- if embedding in supported_embeddings:
65
- _embeddings = supported_embeddings[embedding](**kwargs)
66
- else:
67
- raise NotImplementedError(f"Embedding {embedding} not supported.")
68
-
69
- if vector_store in supported_vector_stores:
70
- _vector_store = supported_vector_stores[vector_store]
71
- else:
72
- raise NotImplementedError(f"Vector store {vector_store} not supported.")
73
-
74
  return FolderIndex.from_files(
75
- files=files, embeddings=_embeddings, vector_store=_vector_store
76
  )
 
1
  from typing import List, Type
2
 
3
  from langchain.docstore.document import Document
4
+ from langchain.embeddings import HuggingFaceBgeEmbeddings
5
  from langchain.embeddings.base import Embeddings
6
  from langchain.vectorstores import VectorStore
7
  from langchain.vectorstores.faiss import FAISS
 
50
  def embed_files(
51
  files: List[File], embedding: str, vector_store: str, **kwargs
52
  ) -> FolderIndex:
53
+ model_name = "BAAI/bge-small-en"
54
+ model_kwargs = {'device': 'cpu'}
55
+ encode_kwargs = {'normalize_embeddings': True} # set True to compute cosine similarity
56
+ model_norm = HuggingFaceBgeEmbeddings(
57
+ model_name=model_name,
58
+ model_kwargs=model_kwargs,
59
+ encode_kwargs=encode_kwargs
60
+ )
61
+ # embeddings = OpenAIEmbeddings
62
+ embeddings = model_norm
 
 
 
 
 
 
 
 
 
 
 
63
  return FolderIndex.from_files(
64
+ files=files, embeddings=embeddings, vector_store=FAISS
65
  )