Spaces:
Running
Running
import os | |
import re | |
from openai import OpenAI | |
from llama_index.llms.groq import Groq | |
from mistralai.client import MistralClient | |
from mistralai.models.chat_completion import ChatMessage | |
import prompt_tracing | |
import comet_llm | |
from openai import AzureOpenAI | |
from dotenv import load_dotenv | |
load_dotenv() | |
def check_claim_validity(s): | |
pattern = r'claim validity:\s*[,.!\s]*true' | |
if re.search(pattern, s, re.IGNORECASE | re.DOTALL): | |
return True | |
else: | |
return False | |
def check_response_validity(s): | |
pattern = r'response validity:\s*[,.!\s]*true' | |
if re.search(pattern, s, re.IGNORECASE | re.DOTALL): | |
return True | |
else: | |
return False | |
def check_response_given(s): | |
pattern = r'response given:\s*[,.!\s]*true' | |
if re.search(pattern, s, re.IGNORECASE | re.DOTALL): | |
return True | |
else: | |
return False | |
def check_prima_facie_rebutted(s): | |
# pattern = r'Prima Facie refuted:\s*[,.!\s]*true' | |
pattern = r'Prima Facie refuted:\s*[,.!\s]*(?:true|yes)' | |
if re.search(pattern, s, re.IGNORECASE | re.DOTALL): | |
return True | |
else: | |
return False | |
def go_to_stage_4_1(s): | |
pattern = r'Defendant provided new evidence:\s*[,.!\s]*true' | |
if re.search(pattern, s, re.IGNORECASE | re.DOTALL): | |
return True | |
else: | |
return False | |
def llm_function_call(prompt, model): | |
if "gpt" in model: | |
client = AzureOpenAI( | |
api_key=os.getenv("AZURE_OPENAI_API_KEY"), | |
api_version="2024-02-01", | |
azure_endpoint = os.getenv("AZURE_OPENAI_ENDPOINT") | |
) | |
chat_completion = client.chat.completions.create( | |
model="gpt-4", | |
temperature=0, | |
messages = [ | |
{ | |
"role":"user", | |
"content":prompt | |
}]) | |
if prompt_tracing.tracking: | |
comet_llm.log_prompt( | |
prompt=prompt_tracing.user + ":\n" + prompt, | |
output=chat_completion.choices[0].message.content, | |
) | |
return chat_completion.choices[0].message.content | |
elif model == "llama3-70b-8192": | |
llm = Groq(model="llama3-70b-8192", api_key=os.environ.get("GROQ_API_KEY"), temperature=0) | |
response = llm.complete(prompt) | |
return response.text | |
elif model == "mistral-large-azure": | |
client = MistralClient( | |
endpoint=os.environ.get("AZURE_AI_MISTRAL_LARGE_ENDPOINT"), api_key=os.environ.get("AZURE_AI_MISTRAL_LARGE_KEY") | |
) | |
response = client.chat( | |
model="azureai", | |
messages=[ | |
ChatMessage( | |
role="user", | |
content=prompt, | |
) | |
], | |
max_tokens=4096, | |
temperature=0, | |
) | |
print(response.usage) | |
return response.choices[0].message.content | |
else: | |
return "no model available" | |