Spaces:
Paused
Paused
File size: 3,503 Bytes
03f1c64 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 |
from modules.base.llm_chain_config import LLMChainConfig
from modules.knowledge_retrieval.destination_chain import DestinationChainStrategy
from modules.knowledge_retrieval.base import KnowledgeDomain
from loguru import logger
from langchain import PromptTemplate, LLMChain
from langchain.llms.openai import OpenAI
from typing import Callable
import pprint
class GamingDomain(KnowledgeDomain):
"""
GamingDomain Class
Design:
This class extends the KnowledgeDomain class and provides a specific implementation for
generating responses to gaming-related questions. It adheres to the Single Responsibility
Principle (SRP) with its single responsibility of generating gaming-related responses.
Intended Implementation:
The generate_response method should generate appropriate responses to gaming-related questions.
Depending on the requirements, this could involve a rule-based approach, a trained machine
learning model, or some other method of generating responses.
"""
def generate_response(self, question: str) -> str:
template_cot = """You are asked a gaming-related question and rather than simply guessing the right answer break down the solution into a series of steps
The question is {question}
Write out your step by step reasoning and after considering all of the facts and applying this reasoning write out your final answer
"""
prompt = PromptTemplate(template=template_cot, input_variables=["question"])
llm_chain = LLMChain(prompt=prompt, llm=OpenAI(temperature=0.7, max_tokens=1500)) # assuming OpenAI is the LLM to be used
response_cot = llm_chain.run(question)
return response_cot
class GamingChain(DestinationChainStrategy):
"""
GamingChain Class
Design:
This class is a specific implementation of the ChainStrategy class.
It follows the Open/Closed Principle (OCP) because it extends the ChainStrategy class
without modifying its behavior. It also adheres to the Dependency Inversion Principle (DIP) as it
depends on the abstraction (GamingDomain) rather than a concrete class.
Intended Implementation:
The GamingChain class serves as a wrapper around a GamingDomain instance. It implements the run
method from the ChainStrategy class, which simply calls the generate_response method of the GamingDomain.
As such, when the run method is called with a question as input, the GamingChain class will return a
response generated by the GamingDomain.
"""
def __init__(self, config: LLMChainConfig, display: Callable):
super().__init__(config=config, display=display, knowledge_domain=GamingDomain(), usage=config.usage)
print("Creating Gaming Chain with config: ")
pprint.pprint(vars(config))
def run(self, question):
print('Using Gaming Chain of Thought')
self.display("Using 'Gaming Chain of Thought'")
response_cot = super().run(question)
return response_cot
def get_gaming_chain_config(temperature: float = 0.7) -> LLMChainConfig:
usage = """
This problem is gaming-related and related to active play and leasure that we engage in. Example topics are :
- What is the best way to play a game?
- What games do you like to play?
- What games have we played together?
- Gaming scenario generation
- Gaming Configurations
And related items
"""
return LLMChainConfig(usage=usage, temperature=temperature)
|