import json | |
import os | |
from together import Together | |
# def rerank_best_answer(json_files, config_file='config.json', model="meta-llama/Meta-Llama-3.1-8B-Instruct-Turbo"): | |
# # Load API key from configuration file | |
# together_ai_key = os.getenv("TOGETHER_AI") | |
# if not together_ai_key: | |
# raise ValueError("TOGETHER_AI environment variable not found. Please set it before running the script.") | |
# # Initialize Together client | |
# client = Together(api_key=together_ai_key) | |
# # Combine all JSON files into a single structure | |
# combined_prompts = {} | |
# for json_file in json_files: | |
# with open(json_file, 'r') as file: | |
# data = json.load(file) | |
# # Format the input for the prompt | |
# for item in data: | |
# query_id = item['query_id'] | |
# if query_id not in combined_prompts: | |
# combined_prompts[query_id] = { | |
# "question": item['input'], | |
# "answers": {} | |
# } | |
# combined_prompts[query_id]["answers"][json_file] = item['response'] | |
# responses = [] | |
# for query_id, prompt in combined_prompts.items(): | |
# # Generate the prompt text | |
# prompt_text = f"""Input JSON: | |
# {json.dumps(prompt, indent=4)} | |
# For the above question, identify which model gave the best response based on accuracy. Ensure the chosen response is an answer and not a follow-up question. Provide the output in the format: | |
# {{ | |
# "best_model": "<model_name>", | |
# "best_answer": "<answer>" | |
# }} | |
# Just output this JSON and nothing else. | |
# """ | |
# # Generate response from Together API | |
# response = client.chat.completions.create( | |
# model=model, | |
# messages=[{"role": "user", "content": prompt_text}], | |
# ) | |
# response_content = response.choices[0].message.content | |
# # print(response_content) | |
# prompt_text_extract_bestModel = f"""Content: | |
# {response_content} | |
# Whats the best_model from above? | |
# """ | |
# prompt_text_extract_bestAnswer = f"""Content: | |
# {response_content} | |
# Whats the best_answer from above? | |
# """ | |
# print(prompt_text_extract_bestModel) | |
# print(prompt_text_extract_bestAnswer) | |
# response_bestModel = client.chat.completions.create( | |
# model=model, | |
# messages=[{"role": "user", "content": prompt_text_extract_bestModel}], | |
# ) | |
# response_bestAnswer = client.chat.completions.create( | |
# model=model, | |
# messages=[{"role": "user", "content": prompt_text_extract_bestAnswer}], | |
# ) | |
# # print({"query_id": query_id, "question": prompt["question"], "Ranker_Output": response.choices[0].message.content}) | |
# responses.append({"query_id": query_id, "question": prompt["question"], "best_model": response_bestModel.choices[0].message.content, "best_answer": response_bestAnswer.choices[0].message.content}) | |
# print(response_bestModel.choices[0].message.content) | |
# return responses | |
def rankerAgent(prompt, model="meta-llama/Meta-Llama-3.1-8B-Instruct-Turbo"): | |
# Load API key from configuration file | |
together_ai_key = os.getenv("TOGETHER_AI") | |
if not together_ai_key: | |
raise ValueError("TOGETHER_AI environment variable not found. Please set it before running the script.") | |
# Initialize Together client | |
client = Together(api_key=together_ai_key) | |
prompt_text = f"""Input JSON: | |
{json.dumps(prompt, indent=4)} | |
For the above question, identify which model gave the best response based on accuracy. Ensure the chosen response is an answer and not a follow-up question. The best_answer should be from the best_model only, as given in the above content. Provide the output in the format: | |
{{ | |
"best_model": "<model_name>", | |
"best_answer": "<answer>" | |
}} | |
Just output this JSON and nothing else. | |
""" | |
# Generate response from Together API | |
response = client.chat.completions.create( | |
model=model, | |
messages=[{"role": "user", "content": prompt_text}], | |
) | |
response_content = response.choices[0].message.content | |
# print(response_content) | |
prompt_text_extract_bestModel = f"""Content: | |
{response_content} | |
Whats the best_model from above? | |
""" | |
prompt_text_extract_bestAnswer = f"""Content: | |
{response_content} | |
Whats the best_answer from above? | |
""" | |
response_bestModel = client.chat.completions.create( | |
model=model, | |
messages=[{"role": "user", "content": prompt_text_extract_bestModel}], | |
) | |
response_bestAnswer = client.chat.completions.create( | |
model=model, | |
messages=[{"role": "user", "content": prompt_text_extract_bestAnswer}], | |
) | |
return response_bestModel.choices[0].message.content, response_bestAnswer.choices[0].message.content | |