fsal commited on
Commit
64ddbf9
1 Parent(s): 8045124

change dockerfile permissions

Browse files
Dockerfile CHANGED
@@ -8,12 +8,12 @@ ENV PYTHONDONTWRITEBYTECODE=1 \
8
  PATH="/home/appuser/.local/bin:$PATH"
9
 
10
  RUN pip install --user --no-cache-dir --upgrade pip
11
- COPY ./requirements.txt /home/appuser/requirements.txt
12
  RUN pip install --user --no-cache-dir --upgrade -r /home/appuser/requirements.txt
13
 
14
- COPY ./langchain-streamlit-demo/ /home/appuser/langchain-streamlit-demo/
15
 
16
  WORKDIR /home/appuser/langchain-streamlit-demo
17
  EXPOSE 7860
18
 
19
- CMD ["streamlit", "run", "/home/appuser/langchain-streamlit-demo/app.py", "--server.port", "7860", "--server.address", "0.0.0.0"]
 
8
  PATH="/home/appuser/.local/bin:$PATH"
9
 
10
  RUN pip install --user --no-cache-dir --upgrade pip
11
+ COPY --chown=appuser ./requirements.txt /home/appuser/requirements.txt
12
  RUN pip install --user --no-cache-dir --upgrade -r /home/appuser/requirements.txt
13
 
14
+ COPY --chown=appuser ./langchain-streamlit-demo/ /home/appuser/langchain-streamlit-demo/
15
 
16
  WORKDIR /home/appuser/langchain-streamlit-demo
17
  EXPOSE 7860
18
 
19
+ CMD ["streamlit", "run", "/home/appuser/langchain-streamlit-demo/app.py", "--server.port", "7860", "--server.address", "0.0.0.0", "--server.enableXsrfProtection", "false"]
langchain-streamlit-demo/.streamlit/config.toml CHANGED
@@ -4,3 +4,7 @@ backgroundColor="#FFFFFF"
4
  secondaryBackgroundColor="#F0F2F6"
5
  textColor="#262730"
6
  font="sans serif"
 
 
 
 
 
4
  secondaryBackgroundColor="#F0F2F6"
5
  textColor="#262730"
6
  font="sans serif"
7
+
8
+ [server]
9
+ enableCORS = false
10
+ enableXsrfProtection = false
langchain-streamlit-demo/app.py CHANGED
@@ -27,8 +27,9 @@ from llm_resources import (
27
  get_runnable,
28
  get_texts_and_multiretriever,
29
  )
30
- from python_coder import get_agent as get_python_agent
31
- from research_assistant.chain import get_chain as get_research_assistant_chain
 
32
  from streamlit_feedback import streamlit_feedback
33
 
34
  __version__ = "2.1.4"
@@ -248,12 +249,16 @@ with sidebar:
248
  "refine",
249
  "map_reduce",
250
  "map_rerank",
251
- "Q&A Generation",
252
- "Summarization",
253
  ],
254
  index=0,
255
  help=chain_type_help,
256
  )
 
 
 
 
257
  # use_azure = st.toggle(
258
  # label="Use Azure OpenAI",
259
  # value=st.session_state.AZURE_EMB_AVAILABLE,
@@ -417,6 +422,8 @@ if st.session_state.llm:
417
 
418
  # --- Chat Input ---
419
  prompt = st.chat_input(placeholder="Ask me a question!")
 
 
420
  if prompt:
421
  st.chat_message("user").write(prompt)
422
  feedback_update = None
@@ -458,38 +465,39 @@ if st.session_state.llm:
458
  default_tools += load_tools(["requests_get"])
459
  default_tools += load_tools(["llm-math"], llm=st.session_state.llm)
460
  if st.session_state.provider in ("Azure OpenAI", "OpenAI"):
461
- research_assistant_chain = get_research_assistant_chain(
462
- search_llm=get_llm(**get_llm_args_temp_zero), # type: ignore
463
- writer_llm=get_llm(**get_llm_args_temp_zero), # type: ignore
464
- )
465
  st_callback = StreamlitCallbackHandler(st.container())
466
  callbacks.append(st_callback)
467
 
468
- @tool("web-research-assistant")
469
- def research_assistant_tool(question: str, callbacks: Callbacks = None):
470
- """This assistant returns a comprehensive report based on web research.
471
- It's slow and relatively expensive, so use it sparingly.
472
- Consider using a different tool for quick facts or web queries.
473
- """
474
- return research_assistant_chain.invoke(
475
- dict(question=question),
476
- config=get_config(callbacks),
477
- )
478
-
479
- python_coder_agent = get_python_agent(st.session_state.llm)
480
-
481
- @tool("python-coder-assistant")
482
- def python_coder_tool(input_str: str, callbacks: Callbacks = None):
483
- """This assistant writes PYTHON code.
484
- Give it clear instructions and requirements.
485
- Do not use it for tasks other than Python.
486
- """
487
- return python_coder_agent.invoke(
488
- dict(input=input_str),
489
- config=get_config(callbacks),
490
- )
491
-
492
- TOOLS = [research_assistant_tool, python_coder_tool] + default_tools
 
493
 
494
  if use_document_chat:
495
  st.session_state.doc_chain = get_runnable(
 
27
  get_runnable,
28
  get_texts_and_multiretriever,
29
  )
30
+
31
+ # from python_coder import get_agent as get_python_agent
32
+ # from research_assistant.chain import get_chain as get_research_assistant_chain
33
  from streamlit_feedback import streamlit_feedback
34
 
35
  __version__ = "2.1.4"
 
249
  "refine",
250
  "map_reduce",
251
  "map_rerank",
252
+ # "Q&A Generation",
253
+ # "Summarization",
254
  ],
255
  index=0,
256
  help=chain_type_help,
257
  )
258
+ question = st.selectbox(
259
+ "Choose a test question", ["--"] + default_values.TEST_QUESTIONS
260
+ )
261
+
262
  # use_azure = st.toggle(
263
  # label="Use Azure OpenAI",
264
  # value=st.session_state.AZURE_EMB_AVAILABLE,
 
422
 
423
  # --- Chat Input ---
424
  prompt = st.chat_input(placeholder="Ask me a question!")
425
+ if question and question != "--":
426
+ prompt = question
427
  if prompt:
428
  st.chat_message("user").write(prompt)
429
  feedback_update = None
 
465
  default_tools += load_tools(["requests_get"])
466
  default_tools += load_tools(["llm-math"], llm=st.session_state.llm)
467
  if st.session_state.provider in ("Azure OpenAI", "OpenAI"):
468
+ # research_assistant_chain = get_research_assistant_chain(
469
+ # search_llm=get_llm(**get_llm_args_temp_zero), # type: ignore
470
+ # writer_llm=get_llm(**get_llm_args_temp_zero), # type: ignore
471
+ # )
472
  st_callback = StreamlitCallbackHandler(st.container())
473
  callbacks.append(st_callback)
474
 
475
+ # @tool("web-research-assistant")
476
+ # def research_assistant_tool(question: str, callbacks: Callbacks = None):
477
+ # """This assistant returns a comprehensive report based on web research.
478
+ # It's slow and relatively expensive, so use it sparingly.
479
+ # Consider using a different tool for quick facts or web queries.
480
+ # """
481
+ # return research_assistant_chain.invoke(
482
+ # dict(question=question),
483
+ # config=get_config(callbacks),
484
+ # )
485
+
486
+ # python_coder_agent = get_python_agent(st.session_state.llm)
487
+
488
+ # @tool("python-coder-assistant")
489
+ # def python_coder_tool(input_str: str, callbacks: Callbacks = None):
490
+ # """This assistant writes PYTHON code.
491
+ # Give it clear instructions and requirements.
492
+ # Do not use it for tasks other than Python.
493
+ # """
494
+ # return python_coder_agent.invoke(
495
+ # dict(input=input_str),
496
+ # config=get_config(callbacks),
497
+ # )
498
+
499
+ TOOLS = default_tools
500
+ # [research_assistant_tool, python_coder_tool] + default_tools
501
 
502
  if use_document_chat:
503
  st.session_state.doc_chain = get_runnable(
langchain-streamlit-demo/defaults.py CHANGED
@@ -25,7 +25,13 @@ DEFAULT_MODEL = os.environ.get("DEFAULT_MODEL", "gpt-4-turbo-preview")
25
 
26
  DEFAULT_SYSTEM_PROMPT = os.environ.get(
27
  "DEFAULT_SYSTEM_PROMPT",
28
- "You are a helpful chatbot. Do not rush. Always plan, think, and act in a step-by-step manner.",
 
 
 
 
 
 
29
  )
30
 
31
  MIN_TEMP = float(os.environ.get("MIN_TEMPERATURE", 0.0))
@@ -38,6 +44,21 @@ DEFAULT_MAX_TOKENS = int(os.environ.get("DEFAULT_MAX_TOKENS", 1000))
38
 
39
  DEFAULT_LANGSMITH_PROJECT = os.environ.get("LANGCHAIN_PROJECT")
40
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
41
  # AZURE_VARS = [
42
  # "AZURE_OPENAI_BASE_URL",
43
  # "AZURE_OPENAI_API_VERSION",
@@ -102,6 +123,7 @@ DEFAULT_VALUES = namedtuple(
102
  "DEFAULT_RETRIEVER_K",
103
  "SHOW_LANGSMITH_OPTIONS",
104
  "SHOW_AZURE_OPTIONS",
 
105
  ],
106
  )
107
 
@@ -131,4 +153,5 @@ default_values = DEFAULT_VALUES(
131
  DEFAULT_RETRIEVER_K,
132
  SHOW_LANGSMITH_OPTIONS,
133
  SHOW_AZURE_OPTIONS,
 
134
  )
 
25
 
26
  DEFAULT_SYSTEM_PROMPT = os.environ.get(
27
  "DEFAULT_SYSTEM_PROMPT",
28
+ # "You are a helpful chatbot. Do not rush. Always plan, think, and act in a step-by-step manner.",
29
+ """
30
+ Comportati come un operatore di call center.
31
+ Hai accesso a tutte le informazioni necessarie per rispondere alle domande dei clienti tramite il vectorstore.
32
+ Se non sai rispondere ad una domanda, rispondi che non hai la risposta e offri di connettere il cliente con un operatore umano.
33
+ Rispondi in italiano, usando uno stile amichevole ma formale.
34
+ """,
35
  )
36
 
37
  MIN_TEMP = float(os.environ.get("MIN_TEMPERATURE", 0.0))
 
44
 
45
  DEFAULT_LANGSMITH_PROJECT = os.environ.get("LANGCHAIN_PROJECT")
46
 
47
+ TEST_QUESTIONS = [
48
+ "non ho ricevuto le credenziali di accesso all'area riservata: dove posso trovarle?",
49
+ "vorrei informazioni relativamente alle prestazioni presenti nel checkup",
50
+ "la risonanza magnetica è coperta dalla polizza?",
51
+ "le visite odontoiatriche sono coperte dalla polizza?",
52
+ "vorrei informazioni su come richiedere il checkup",
53
+ "vorrei informazioni su come trovare struttura convenzionata che faccia le risonanze magnetiche",
54
+ "come chiede autorizzazione per fare una risonanza magnetica?",
55
+ "come chiedere rimborso per una risonanza magnetica?",
56
+ "quali prestazioni sono incluse nel pacchetto maternità?",
57
+ "come chiedere autorizzazione per prestazione del pacchetto maternità?",
58
+ "come devo procedere per fare l'estensione della copertura al nucleo familiare?",
59
+ "Altro",
60
+ ]
61
+
62
  # AZURE_VARS = [
63
  # "AZURE_OPENAI_BASE_URL",
64
  # "AZURE_OPENAI_API_VERSION",
 
123
  "DEFAULT_RETRIEVER_K",
124
  "SHOW_LANGSMITH_OPTIONS",
125
  "SHOW_AZURE_OPTIONS",
126
+ "TEST_QUESTIONS",
127
  ],
128
  )
129
 
 
153
  DEFAULT_RETRIEVER_K,
154
  SHOW_LANGSMITH_OPTIONS,
155
  SHOW_AZURE_OPTIONS,
156
+ TEST_QUESTIONS,
157
  )
langchain-streamlit-demo/llm_resources.py CHANGED
@@ -29,8 +29,9 @@ from langchain.text_splitter import RecursiveCharacterTextSplitter
29
  from langchain.tools.base import BaseTool
30
  from langchain.vectorstores import FAISS
31
  from langchain_core.messages import SystemMessage
32
- from qagen import get_rag_qa_gen_chain
33
- from summarize import get_rag_summarization_chain
 
34
 
35
 
36
  def get_agent(
@@ -138,17 +139,17 @@ def get_runnable(
138
  memory=memory,
139
  ) | (lambda output: output["text"])
140
 
141
- if document_chat_chain_type == "Q&A Generation":
142
- return get_rag_qa_gen_chain(
143
- retriever,
144
- llm,
145
- )
146
- elif document_chat_chain_type == "Summarization":
147
- return get_rag_summarization_chain(
148
- summarization_prompt,
149
- retriever,
150
- llm,
151
- )
152
  else:
153
  return RetrievalQA.from_chain_type(
154
  llm=llm,
 
29
  from langchain.tools.base import BaseTool
30
  from langchain.vectorstores import FAISS
31
  from langchain_core.messages import SystemMessage
32
+
33
+ # from qagen import get_rag_qa_gen_chain
34
+ # from summarize import get_rag_summarization_chain
35
 
36
 
37
  def get_agent(
 
139
  memory=memory,
140
  ) | (lambda output: output["text"])
141
 
142
+ # if document_chat_chain_type == "Q&A Generation":
143
+ # return get_rag_qa_gen_chain(
144
+ # retriever,
145
+ # llm,
146
+ # )
147
+ # elif document_chat_chain_type == "Summarization":
148
+ # return get_rag_summarization_chain(
149
+ # summarization_prompt,
150
+ # retriever,
151
+ # llm,
152
+ # )
153
  else:
154
  return RetrievalQA.from_chain_type(
155
  llm=llm,