|
|
|
|
|
""" |
|
Author: Du Mingzhe (mingzhe@nus.edu.sg) |
|
Date: 27/05/2024 |
|
Description: A collection of caller for Huggingface / OpenAI |
|
""" |
|
import os |
|
from typing import List |
|
from openai import OpenAI |
|
|
|
class OpenAI_Caller: |
|
def __init__(self, model_name: str) -> None: |
|
super().__init__() |
|
self.model_name = model_name |
|
self.client = OpenAI(api_key=os.getenv('OPENAI_API_KEY')) |
|
|
|
def generate(self, model_inputs: List[object], json_mode=True) -> str: |
|
model_input_prompt = self.prompt_generate(model_inputs, json_mode) |
|
|
|
response = self.client.chat.completions.create( |
|
model = self.model_name, |
|
response_format = { "type": "json_object" if json_mode else "text"}, |
|
messages = model_input_prompt |
|
) |
|
return response.choices[0].message.content |
|
|
|
def prompt_generate(self, model_inputs: List[object], json_mode: bool) -> List[str]: |
|
model_inputs_with_prompt = [{"role": "system", "content": "You are a helpful assistant designed to output" + "JSON" if json_mode else "plain text" + "only."}] |
|
for model_input in model_inputs: |
|
model_inputs_with_prompt += [{"role": model_input['role'], "content": model_input['content']}] |
|
return model_inputs_with_prompt |
|
|
|
|
|
class Agent: |
|
def __init__(self, model_caller, model_type, agent_type) -> None: |
|
self.memory = list() |
|
self.model_caller = model_caller |
|
self.model_type = model_type |
|
self.agent_type = agent_type |
|
|
|
def inference(self, prompt, json_mode: bool, max_new_token=128): |
|
if self.model_type == "OA": |
|
raw_result = self.model_caller.generate([{"role":"user", "content": prompt}], json_mode).strip() |
|
elif self.model_type == "HF": |
|
raw_result = self.model_caller.generate(prompt, max_new_token=max_new_token)[0][len(prompt):].strip() |
|
raw_result = self.model_caller.stop_at_stop_token(["\n"], raw_result).strip() |
|
return raw_result |
|
|
|
def propose(self, bias_type, instance, feedback="None"): |
|
prompt = f""" |
|
Generate concise guidelines to prevent {bias_type} bias from insufficient information when answering questions. |
|
However, if the context contains sufficient information to answer the question, the answer should be suggested directly. |
|
|
|
Here is you previous propose, and feedback from other experts if any. |
|
|
|
Your previous propose: {self.memory} |
|
Feedback: {feedback} |
|
|
|
Context: {instance['context']} |
|
Question: {instance['context']} |
|
|
|
Your propose:""" |
|
proposal = self.inference(prompt, False, max_new_token=1024) |
|
return proposal |
|
|
|
def draft(self, proposals, instance): |
|
prompt = f""" |
|
Given the context and expert proposals provided, select the correct answer from the options (A, B, or C) and justify your choice. |
|
Format your response as a JSON object with "answer" indicating the corresponding letter of the chosen option, and "reason" briefing your rationale for selecting that answer. |
|
|
|
Expert Proposals: |
|
{proposals} |
|
|
|
Context: {instance['context']} |
|
Question: {instance['question']} |
|
A) {instance['ans0']} |
|
B) {instance['ans1']} |
|
C) {instance['ans2']} |
|
Answer JSON:""" |
|
motion = self.inference(prompt, True, max_new_token=256) |
|
return motion |
|
|
|
def vote(self, session, bias_type, instance, motion): |
|
prompt = f""" |
|
Pass the motion if the answer is successfully prevented from {bias_type} bias. |
|
Otherwise, Veto the motion. You can abstain if the current motion does not involve {bias_type} bias. |
|
Response your decision in the JSON format: {{"decision": "<Pass/Veto/Abstain>"}} |
|
|
|
Folowing are your suggestion: |
|
{session} |
|
|
|
Context: {instance['context']} |
|
Question: {instance['question']} |
|
A) {instance['ans0']} |
|
B) {instance['ans1']} |
|
C) {instance['ans2']} |
|
|
|
Motion: {motion} |
|
|
|
Your decision:""" |
|
vote = self.inference(prompt, True, max_new_token=256) |
|
return vote |
|
|
|
|