ai_workflows / app /workflows /til /analyse_til_v2.py
theRealNG's picture
workflows(all): fix version spelling
5dc54c6
from langchain import callbacks, hub
from langchain_core.messages import SystemMessage
from langchain_core.output_parsers import JsonOutputParser
from langchain_core.prompts import ChatPromptTemplate, HumanMessagePromptTemplate
from langchain_openai import ChatOpenAI
from pydantic import BaseModel, Field, UUID4
from typing import List, Optional
import os
import pprint
class AnalyseTilV2:
def kickoff(self, inputs={}):
print("Human Message:")
pprint.pp(inputs)
self.content = inputs["content"]
self._gather_feedback()
return self._final_call_on_feedback()
def _final_call_on_feedback(self):
final_results = []
for feedback in self.feedback_results:
print("Final analysis of:")
pprint.pp(feedback)
result = {
"takeaway": feedback.get('til', ""),
"feedback": "not_ok",
}
if feedback["factuality_categorization"] != 'High':
result["reason"] = feedback["assessment_reason"]
final_results = final_results + [result]
continue
if feedback["insightful_categorization"] != 'High':
result["reason"] = feedback["assessment_reason"]
final_results = final_results + [result]
continue
result["feedback"] = "ok"
final_results = final_results + [result]
response = {"til": final_results, "run_id": self.run_id}
print("Final Results:")
pprint.pp(response)
return response
def _gather_feedback(self):
feedback_chain = self._build_feedback_chain()
feedback_parser = JsonOutputParser(pydantic_object=TilV2FeedbackResults)
pprint.pp("Analysing the TIL.....")
with callbacks.collect_runs() as cb:
self.feedback_results = feedback_chain.invoke(
{"til_content": self.content, "format_instructions": feedback_parser.get_format_instructions()})['tils']
self.run_id = cb.traced_runs[0].id
print("Run ID: ", self.run_id)
print("Feedback: ")
pprint.pp(self.feedback_results)
def _build_feedback_chain(self):
feedback_parser = JsonOutputParser(pydantic_object=TilV2FeedbackResults)
feedback_prompt = hub.pull("til_analysis")
print("Prompt: ")
pprint.pp(feedback_prompt, width=80)
llm = ChatOpenAI(model=os.environ["OPENAI_MODEL"], temperature=0.2)
analysis_chain = (feedback_prompt | llm | feedback_parser).with_config({
"tags": ["til"], "run_name": "Analysing TIL v2",
"metadata": {
"version": "v2.0.0",
"growth_activity": "til",
"env": os.environ["ENV"],
"model": os.environ["OPENAI_MODEL"]
}
})
return analysis_chain
class TilV2FeedbackResult(BaseModel):
til: str = Field(
description="TIL as exactly captured by the user without any modifications.")
insightful_categorization: str = Field(
description="TIL categorization as High/Medium/Low based on correctness on the insightful metric.")
factuality_categorization: str = Field(
description="TIL categorization as High/Medium/Low based on correctness on the factuality metric.")
assessment_reason: str = Field(
description="Explain your assessment in one or two sentences about the factuality and insightful metrics directly to the user, but only if they are not rated as High. Use the second-person point of view.")
class TilV2FeedbackResults(BaseModel):
tils: List[TilV2FeedbackResult]
class TilV2FinalFeedback(BaseModel):
takeaway: str
feedback: str
reason: Optional[str] = None
class TilV2FeedbackResponse(BaseModel):
run_id: UUID4
til: List[TilV2FinalFeedback]