jpfearnworks commited on
Commit
1b9f020
0 Parent(s):

Initial Commit

Browse files
.envtemplate ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ OPENAI_API_KEY=
2
+ HUGGINGFACE_API_TOKEN=
.gitignore ADDED
@@ -0,0 +1,90 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Byte-compiled / optimized / DLL files
2
+ __pycache__/
3
+ *.py[cod]
4
+ *$py.class
5
+
6
+ # C extensions
7
+ *.so
8
+
9
+ # Distribution / packaging
10
+ .Python
11
+ env/
12
+ build/
13
+ dist/
14
+ eggs/
15
+ *.egg-info/
16
+ *.egg
17
+
18
+ # Installer logs
19
+ pip-log.txt
20
+ pip-delete-this-directory.txt
21
+
22
+ # Unit test / coverage reports
23
+ htmlcov/
24
+ .coverage
25
+ .tox/
26
+ nosetests.xml
27
+ coverage.xml
28
+
29
+ # Translations
30
+ *.mo
31
+ *.pot
32
+
33
+ # Django stuff:
34
+ *.log
35
+ local_settings.py
36
+ db.sqlite3
37
+ db.sqlite3-journal
38
+
39
+ # Flask stuff:
40
+ instance/
41
+ .webassets-cache
42
+
43
+ # Scrapy stuff:
44
+ .scrapy
45
+
46
+ # Sphinx documentation
47
+ docs/_build/
48
+
49
+ # PyBuilder
50
+ target/
51
+
52
+ # Jupyter Notebook
53
+ .ipynb_checkpoints
54
+
55
+ # IPython
56
+ profile_default/
57
+ ipython_config.py
58
+
59
+ # pyenv
60
+ .python-version
61
+
62
+ # pipenv
63
+ # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
64
+ # However, in case of collaboration, if having platform-specific dependencies or dependencies
65
+ # having no cross-platform support, pipenv may install dependencies that don't work, or not
66
+ # install all needed dependencies.
67
+ #Pipfile.lock
68
+
69
+ # PEP 582; used by e.g. github.com/David-OConnor/pyflow
70
+ __pypackages__/
71
+
72
+ # Celery stuff
73
+ celerybeat-schedule
74
+ celerybeat.pid
75
+
76
+ # SageMath parsed files
77
+ *.sage.py
78
+
79
+ # Environments
80
+ .env
81
+ .venv
82
+ env/
83
+ venv/
84
+ ENV/
85
+ env.bak/
86
+ venv.bak/
87
+
88
+ # Spyder project settings
89
+ .spyderproject
90
+ .spyproject
Dockerfile ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ FROM python:3.9
2
+
3
+ WORKDIR /app
4
+ RUN pip install streamlit
5
+ COPY requirements.txt requirements.txt
6
+ RUN pip install -r requirements.txt
7
+
8
+
9
+ COPY . .
10
+
11
+ WORKDIR /app/src
12
+
13
+ ENV PATH="/root/.local/bin:${PATH}"
14
+
15
+ EXPOSE 8501
16
+
17
+ CMD ["streamlit", "run", "main.py", ]
docker-compose.yml ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ version: '3.8'
2
+
3
+ services:
4
+ app:
5
+ build: .
6
+ ports:
7
+ - "8501:8501"
8
+ volumes:
9
+ - ./src:/app/src
10
+ command: streamlit run main.py
requirements.txt ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ langchain
2
+ streamlit
3
+ python-dotenv
4
+ openai
5
+ wikipedia
src/__init__.py ADDED
File without changes
src/main.py ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from dotenv import load_dotenv, find_dotenv
2
+ import os
3
+ import streamlit as st
4
+ from reasoning import ReasoningRouter, default_reasoning_router_config
5
+ load_dotenv(find_dotenv())
6
+
7
+ openai_api_key = os.getenv("OPENAI_API_KEY")
8
+
9
+ def run_app():
10
+ """
11
+ Runs the Streamlit application.
12
+
13
+ Returns:
14
+ None
15
+ """
16
+ openai_api_key = os.getenv("OPENAI_API_KEY")
17
+
18
+ col1, col2 = st.columns([1, 3])
19
+ with col1:
20
+ st.text("AI Agents Sandbox")
21
+ with col2:
22
+ st.title("Prompt Strategy Demo")
23
+ question = st.text_area('Enter your question here:', height=200)
24
+ config = default_reasoning_router_config()
25
+ if question:
26
+ determiner = ReasoningRouter(api_key=openai_api_key, config=config, question=question,display=st.write)
27
+ determiner.determine_and_execute()
28
+
29
+ if __name__ == "__main__":
30
+ run_app()
31
+
src/reasoning/__init__.py ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ from .react import ReactStrategy
2
+ from .tree_of_thought import TreeOfThoughtStrategy
3
+ from .chain_of_thought import ChainOfThoughtStrategy
4
+ from .reasoning_router import ReasoningRouter, default_reasoning_router_config
5
+ from .reasoning_strategy import ReasoningStrategy, ReasoningConfig
src/reasoning/chain_of_thought.py ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from langchain import PromptTemplate, LLMChain
2
+ import streamlit as st
3
+ from .reasoning_strategy import ReasoningStrategy, ReasoningConfig
4
+ from typing import Callable
5
+
6
+ class ChainOfThoughtStrategy(ReasoningStrategy):
7
+ def __init__(self, config: ReasoningConfig, display: Callable):
8
+ super().__init__(config=config, display=display)
9
+
10
+ def run(self, question):
11
+ print('Using Chain of Thought')
12
+ self.display("Using 'Chain of Thought'")
13
+
14
+ template_cot = """You are asked a question and rather than simply guessing the right answer break down the solution into a series of staps
15
+ The question is {question}
16
+
17
+ Write out your step by step reasoning and after considering all of the facts and applying this reasoning write out your final answer
18
+ """
19
+ prompt = PromptTemplate(template=template_cot, input_variables=["question"])
20
+ llm_chain = LLMChain(prompt=prompt, llm=self.llm)
21
+ response_cot = llm_chain.run(question)
22
+ print(response_cot)
23
+ self.display(response_cot)
24
+
25
+ def default_cot_confg():
26
+ usage = """
27
+ This problem is simple and the solution may be obtained by focusing on generating a coherent series
28
+ of reasoning steps that lead to the final answer. The approach provides interpretability, decomposes
29
+ multi-step problems into intermediate steps, and allows for additional computation allocation
30
+ """
31
+ return ReasoningConfig(usage=usage)
src/reasoning/react.py ADDED
@@ -0,0 +1,50 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from langchain.agents import initialize_agent, Tool, AgentExecutor
2
+ from .reasoning_strategy import ReasoningStrategy, ReasoningConfig
3
+ from langchain.docstore.wikipedia import Wikipedia
4
+ from langchain.agents import initialize_agent, Tool, AgentExecutor
5
+ from langchain.agents.react.base import DocstoreExplorer
6
+ from typing import Callable
7
+
8
+
9
+ class ReactStrategy(ReasoningStrategy):
10
+ def __init__(self, config: ReasoningConfig, display: Callable):
11
+ super().__init__(config=config, display=display)
12
+
13
+ def run(self, question) -> str:
14
+ print('Using ReAct')
15
+ self.display("Using 'ReAct' - (Reasoning and Action)")
16
+
17
+ docstore = DocstoreExplorer(Wikipedia())
18
+ tools = [
19
+ Tool(
20
+ name="Search",
21
+ func=docstore.search,
22
+ description="Search for a term in the docstore.",
23
+ ),
24
+ Tool(
25
+ name="Lookup",
26
+ func=docstore.lookup,
27
+ description="Lookup a term in the docstore.",
28
+ )
29
+ ]
30
+ react = initialize_agent(tools, self.llm, agent="react-docstore", verbose=True)
31
+ agent_executor = AgentExecutor.from_agent_and_tools(
32
+ agent=react.agent,
33
+ tools=tools,
34
+ verbose=True,
35
+ )
36
+ response_react = agent_executor.run(question)
37
+ print(response_react)
38
+ self.display(response_react)
39
+ return response_react
40
+
41
+ def default_react_config() -> ReasoningConfig:
42
+ usage = """
43
+ The solution for this problem requires searching for further information online,
44
+ generating reasoning traces and task-specific actions in an interleaved manner.
45
+ Starting with incomplete information this technique will prompt for the need to get
46
+ additional helpful information at each step. It allows for dynamic reasoning to create,
47
+ maintain, and adjust high-level plans for acting, while also interacting with external
48
+ sources to incorporate additional information into reasoning
49
+ """
50
+ return ReasoningConfig(usage=usage)
src/reasoning/reasoning_router.py ADDED
@@ -0,0 +1,82 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from langchain import PromptTemplate, LLMChain
2
+ from .react import ReactStrategy, default_react_config
3
+ from .tree_of_thought import TreeOfThoughtStrategy, default_tot_config
4
+ from .chain_of_thought import ChainOfThoughtStrategy, default_cot_confg
5
+ from .reasoning_strategy import ReasoningConfig
6
+ from typing import Tuple, Callable
7
+ import re
8
+ import os
9
+
10
+ class ReasoningRouter:
11
+ def __init__(self, api_key: str, config: ReasoningConfig, question:str, display: Callable):
12
+ """
13
+ Initializes a ReasoningRouter instance.
14
+
15
+ Args:
16
+ question (str): The user's question.
17
+
18
+ Returns:
19
+ None
20
+ """
21
+ self.api_key = api_key
22
+ self.llm = config.llm_class(temperature=config.temperature, max_tokens=config.max_tokens)
23
+ self.question: str = question
24
+ self.display: Callable = display
25
+
26
+ self.strategies = {
27
+ 1: ReactStrategy(default_react_config(), display=self.display),
28
+ 2: TreeOfThoughtStrategy(default_tot_config(),display=self.display),
29
+ 3: ChainOfThoughtStrategy(default_cot_confg(),display=self.display)
30
+ }
31
+ self.usage_block = f"""
32
+
33
+ 1. {self.strategies[1].usage} [1].
34
+ 2. {self.strategies[2].usage} [2].
35
+ 3. {self.strategies[3].usage} [3].
36
+
37
+ """
38
+ self.template = """
39
+ Consider the following problem or puzzle: {question}. Based on the characteristics of the problem,
40
+ identify the most suitable approach among the three techniques described below. consider each carefully
41
+ in the context of the question, write out the likelihood of success of each, and then select the most
42
+ appropriate approach:""" + self.usage_block + """
43
+ Based on the characteristics of the given problem or puzzle, select the technique that aligns most closely with the nature of the problem. It is important to first provide the number of the technique that best solves the problem, followed by a period. Then you may provide your reason why you have chosen this technique.
44
+
45
+ The number of the selected technique is...
46
+ """
47
+
48
+ @staticmethod
49
+ def find_first_integer(text):
50
+ match = re.search(r'\d+', text)
51
+ if match:
52
+ return int(match.group())
53
+ else:
54
+ return None
55
+
56
+ def determine_and_execute(self) -> Tuple[str, str]:
57
+ """
58
+ Determines the appropriate reasoning strategy based on the user's question and executes it.
59
+
60
+ Returns:
61
+ None
62
+ """
63
+
64
+ prompt = PromptTemplate(template=self.template, input_variables=["question"])
65
+ llm_chain = LLMChain(prompt=prompt, llm=self.llm)
66
+
67
+ response = llm_chain.run(self.question)
68
+ print(response)
69
+ self.display(response)
70
+ n = self.find_first_integer(response)
71
+
72
+ if n in self.strategies:
73
+ strat_resp = self.strategies[n].run(self.question)
74
+ else:
75
+ strat_resp = (f"Strategy number {n} is not recognized.")
76
+ print(strat_resp)
77
+
78
+ return response, strat_resp
79
+
80
+ def default_reasoning_router_config() -> ReasoningConfig:
81
+ usage="This router should be used when determing the most effective strategy for a query requiring more complex, but general reasoning to derive"
82
+ return ReasoningConfig(temperature=0.6, max_tokens=3000, usage=usage)
src/reasoning/reasoning_strategy.py ADDED
@@ -0,0 +1,37 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from langchain.llms import OpenAI
2
+ from pydantic import BaseModel
3
+ from langchain.llms.base import BaseLLM
4
+ from typing import Type, Callable
5
+ import streamlit as st
6
+ import os
7
+
8
+
9
+
10
+ class ReasoningConfig(BaseModel):
11
+ """
12
+ A class representing a reasoning strategy for answering questions.
13
+
14
+ Args:
15
+ config (ReasoningConfig): The configuration for the reasoning strategy.
16
+ display (Callable): A function for displaying output.
17
+
18
+ Attributes:
19
+ llm (BaseLLM): The language model used for reasoning.
20
+ display (Callable): The function for displaying output.
21
+ """
22
+ temperature: float = 0.7
23
+ max_tokens: int = 1500
24
+ llm_class: Type[BaseLLM] = OpenAI
25
+ usage: str
26
+
27
+ class ReasoningStrategy:
28
+ def __init__(self, config: ReasoningConfig, display: Callable):
29
+ self.llm = config.llm_class(temperature=config.temperature, max_tokens=config.max_tokens) # ign
30
+ self.display = display
31
+ self.usage = config.usage
32
+ def run(self, question):
33
+ raise NotImplementedError()
34
+
35
+ def default_reasoning_config():
36
+ usage = "This is the default reasoning model that should only be used as a last resort"
37
+ return ReasoningConfig(usage=usage)
src/reasoning/tree_of_thought.py ADDED
@@ -0,0 +1,44 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from .reasoning_strategy import ReasoningStrategy
2
+ from langchain import LLMChain, PromptTemplate
3
+ from .reasoning_strategy import ReasoningStrategy, ReasoningConfig
4
+ from typing import Callable
5
+
6
+ class TreeOfThoughtStrategy(ReasoningStrategy):
7
+ def __init__(self, config: ReasoningConfig, display: Callable):
8
+ super().__init__(config=config, display=display)
9
+
10
+ def run(self, question)-> str:
11
+ print('Using ToT')
12
+ self.display("Using 'Tree of Thoughts'")
13
+
14
+ template_tot = """Imagine three different experts are answering this question.
15
+ They will brainstorm the answer step by step reasoning carefully and taking all facts into consideration
16
+ All experts will write down 1 step of their thinking,
17
+ then share it with the group.
18
+ They will each critique their response, and the all the responses of others
19
+ They will check their answer based on science and the laws of physics
20
+ Then all experts will go on to the next step and write down this step of their thinking.
21
+ They will keep going through steps until they reach their conclusion taking into account the thoughts of the other experts
22
+ If at any time they realise that there is a flaw in their logic they will backtrack to where that flaw occurred
23
+ If any expert realises they're wrong at any point then they acknowledges this and start another train of thought
24
+ Each expert will assign a likelihood of their current assertion being correct
25
+ Continue until the experts agree on the single most likely answer and write out that answer along with any commentary to support that answer
26
+ The question is {question}
27
+
28
+ The experts reasoning, along with their final answer is...
29
+ """
30
+ prompt = PromptTemplate(template=template_tot, input_variables=["question"])
31
+ llm_chain = LLMChain(prompt=prompt, llm=self.llm)
32
+ response_tot = llm_chain.run(question)
33
+ print(response_tot)
34
+ self.display(response_tot)
35
+ return response_tot
36
+
37
+ def default_tot_config():
38
+ usage= """
39
+ This problem is complex and the solution requires exploring multiple reasoning paths over thoughts.
40
+ It treats the problem as a search over a tree structure, with each node representing a partial
41
+ solution and the branches corresponding to operators that modify the solution. It involves thought
42
+ decomposition, thought generation, state evaluation, and a search algorithm
43
+ """
44
+ return ReasoningConfig(usage=usage)