OpenWormLLM / ring.py
pgleeson's picture
To v0.4.4
a0f6368
import os
from langchain.prompts import PromptTemplate
from langchain_core.output_parsers import StrOutputParser
from openworm_ai.utils.llms import GENERAL_QUERY_PROMPT_TEMPLATE
from openworm_ai.utils.llms import GENERAL_QUERY_LIMITED_PROMPT_TEMPLATE
from openworm_ai.utils.llms import get_llm as get_llm_via_openworm_ai
from openworm_ai.utils.llms import requires_openai_key
from openworm_ai.utils.llms import get_openai_api_key
from openworm_ai.utils.llms import get_gemini_api_key
from openworm_ai.utils.llms import generate_response
from openworm_ai.utils.llms import LLM_GPT35
from openworm_ai.utils.llms import LLM_GPT4
from openworm_ai.utils.llms import LLM_GPT4o
from openworm_ai.utils.llms import LLM_GEMINI_2F
from openworm_ai.utils.llms import LLM_OLLAMA_LLAMA32_1B
from openworm_ai.utils.llms import LLM_OLLAMA_MISTRAL
from openworm_ai.utils.llms import LLM_OLLAMA_OLMO2_7B
from openworm_ai.utils.llms import LLM_COHERE
OPENAI_LLMS = [LLM_GPT35, LLM_GPT4, LLM_GPT4o]
OLLAMA_LLMS = [LLM_OLLAMA_LLAMA32_1B, LLM_OLLAMA_MISTRAL, LLM_OLLAMA_OLMO2_7B]
OPENWORM_AI_LLMS = [LLM_GEMINI_2F] + OPENAI_LLMS + [LLM_COHERE] # + oLLAMA_LLMS
LLM_LLAMA2 = "LLAMA2"
LLM_GEMINI = "gemini-1.5-pro"
LLM_CLAUDE2 = "Claude2.1"
PREF_ORDER_LLMS = [LLM_GEMINI] + OPENWORM_AI_LLMS # + [LLM_LLAMA2, LLM_CLAUDE2]
def get_llamaapi_key():
llamaapi_key = os.environ.get("LLAMAAPI_KEY")
return llamaapi_key
def get_anthropic_key():
anthropic_api_key = os.environ.get["ANTHROPIC_API_KEY"]
return anthropic_api_key
def get_llm(llm_ver, temperature):
if llm_ver in OPENWORM_AI_LLMS:
llm = get_llm_via_openworm_ai(llm_ver, temperature)
elif llm_ver == LLM_LLAMA2:
from llamaapi import LlamaAPI
import asyncio
# Create a new event loop
loop = asyncio.new_event_loop()
# Set the event loop as the current event loop
asyncio.set_event_loop(loop)
llama = LlamaAPI(get_llamaapi_key())
from langchain_experimental.llms import ChatLlamaAPI
llm = ChatLlamaAPI(client=llama)
elif llm_ver == LLM_GEMINI:
k = get_gemini_api_key()
# print(('1111_%s'%k)[::-1])
from langchain_google_genai import ChatGoogleGenerativeAI
llm = ChatGoogleGenerativeAI(model=LLM_GEMINI, google_api_key=k)
elif llm_ver == LLM_CLAUDE2:
from langchain_anthropic import AnthropicLLM
llm = AnthropicLLM(model="claude-2.1")
return llm
def generate_panel_response(input_text, llm_panelists, llm_panel_chair, temperature):
responses = {}
for llm_ver in llm_panelists:
prompt = PromptTemplate(
template=GENERAL_QUERY_PROMPT_TEMPLATE, input_variables=["question"]
)
llm = get_llm(llm_ver, temperature)
llm_chain = prompt | llm | StrOutputParser()
responses[llm_ver] = llm_chain.invoke(input_text)
panel_chair_prompt = """You are a neuroscientist chairing a panel discussion on the nematode C. elegans. A researcher has asked the following question:
{question}
and %i experts on the panel have give their answers.
""" % (len(llm_panelists))
for llm_ver in llm_panelists:
panel_chair_prompt += """
The panelist named Dr. %s has provided the answer: %s
""" % (
llm_ver,
responses[llm_ver],
)
panel_chair_prompt += (
"""
Please generate a brief answer to the researcher's question based on their responses, pointing out where there is any inconsistency"""
+ """ in their answers, and using your own knowledge of C. elegans to try to resolve it."""
)
print(panel_chair_prompt)
prompt = PromptTemplate(template=panel_chair_prompt, input_variables=["question"])
llm = get_llm(llm_panel_chair, temperature)
llm_chain = prompt | llm | StrOutputParser()
response_chair = llm_chain.invoke(input_text)
response = """**%s**: %s""" % (llm_panel_chair, response_chair)
response += """
-----------------------------------
_Individual responses:_
"""
for llm_ver in responses:
response += """
_**%s**:_ _%s_
""" % (
llm_ver,
responses[llm_ver].strip().replace("\n", " "),
)
return response