import asyncio from langchain.chat_models import ChatOpenAI from langchain.prompts.chat import ChatPromptTemplate from langchain.schema import BaseOutputParser from utility import read_pdf,terminal_print class Replacement(BaseOutputParser): """Parse the output of an LLM call to a comma-separated list.""" def parse(self, text: str, **kwargs): """Parse the output of an LLM call.""" if kwargs: print(kwargs) return text.strip().split(", ") @terminal_print # need to review this. async def async_generate(article,name,chain,replacement_term=None): if replacement_term: resp = await chain.ainvoke({"term":replacement_term}) else: resp = await chain.ainvoke({"term":""}) article[name] = resp.content @terminal_print # need to review this. async def execute_concurrent(article,prompts): llm = ChatOpenAI(temperature=0.0,model_name="gpt-3.5-turbo-16k") tasks = [] prompt_type = article["logic"] prompt_list = list(prompts.keys()) print(prompt_list) # for name,p in prompts.items(): while prompt_list: name = prompt_list.pop(0) p = prompts[name] if any([s not in article for s in p["input_list"]]): # prompt_list.append(name) print("skip",name,"due to missing input",p["input_list"]) continue print("executing",p["assessment_step"],name) input_text = "".join([article[s] for s in p["input_list"]]) chat_prompt = ChatPromptTemplate.from_messages([ ("human",input_text), ("system",p[prompt_type]), ]) if "reformat_inst" in p: chat_prompt.append( ("system",p["reformat_inst"]) ) post_prompt_maping = {} post_replace_term = lambda res,map=post_prompt_maping:replace_term(res,map=map) chain = chat_prompt | llm | post_replace_term if "term" in p: tasks.append(async_generate(article,name,chain,replacement_term=p["term"]["term_prompt"])) # in here the name shall be the term_prompt from the terms triggered else: tasks.append(async_generate(article,name,chain)) # in here the name shall be the term_prompt from the terms triggered await asyncio.gather(*tasks) def replace_term(res,**kwargs): if "map" in kwargs: for key,term in kwargs["map"].items(): res.content = res.content.replace(key,term) return res if __name__ == "__main__": # lets try the Blood Loss, Operation Time, and Need for ICU in other folder sample_artice = ".samples/Ha SK, 2008.pdf" sample_content,_ = read_pdf(sample_artice) llm = ChatOpenAI(temperature=0.0,model_name="gpt-3.5-turbo-16k") # with open(".prompts/other/Need for ICU.txt") as f: # prompt = f.read() # name = "Need for ICU" with open(".prompts/other/Operation Time.txt") as f: prompt = f.read() name = "Operation Time" # with open(".prompts/other/Blood Loss.txt") as f: # prompt = f.read() # name = "Blood Loss" post_prompt_maping = {} post_replace_term = lambda res,map=post_prompt_maping:replace_term(res,map=map) chain_prompt = ChatPromptTemplate.from_messages([ ("human",sample_artice), ("system",prompt), ]) # experiment with cascading the chain chain = chain_prompt | llm chain2 = chain | post_replace_term # lets try remove from chain chain2.last.with_retry = True res = chain2.invoke({"term":name}) print(res.content)