
import csv
from transformers import LlamaTokenizer, LlamaForCausalLM
from transformers import AutoTokenizer,AutoModelForCausalLM
import argparse
import torch
import numpy as np
import os
import datetime
import json
from tqdm import tqdm

from graph import ChineseGraph
from ner import MatchExtract
from text import Text

parser = argparse.ArgumentParser()
parser.add_argument("--model_name_or_path", type=str, default="")
parser.add_argument("--lora_weights", type=str, default="")
parser.add_argument("--data_dir", type=str, default="../data")
parser.add_argument("--save_dir", type=str, default="../results/not_specified")
parser.add_argument("--max_length", type=int, default=2048)
parser.add_argument("--load_in_8bit", action='store_true')
parser.add_argument("--with_conf", action='store_true')
parser.add_argument("--cot", action='store_true')
args = parser.parse_args()

m = MatchExtract('/home/llm_user/index/medical/llm-retrieval-qa/data/entities-v2.txt')
g = ChineseGraph('/home/llm_user/index/medical/llm-retrieval-qa/data/triples-v2.txt', m)
t = Text('/home/llm_user/index/medical/llm-retrieval-qa/data/text', m)

tokenizer_class = LlamaTokenizer if 'llama' in args.model_name_or_path else AutoTokenizer
model_class = LlamaForCausalLM if 'llama' in args.model_name_or_path else AutoModelForCausalLM
tokenizer = tokenizer_class.from_pretrained(args.model_name_or_path, trust_remote_code=True)
model = model_class.from_pretrained(args.model_name_or_path,
                                    trust_remote_code=True,
                                    load_in_8bit=args.load_in_8bit,
                                    device_map="auto"
                                    )
choices = ["A", "B", "C", "D", "E"]
choice_ids = [tokenizer.convert_tokens_to_ids(choice) for choice in choices]

def softmax(x):
    z = x - max(x)
    numerator = np.exp(z)
    denominator = np.sum(numerator)
    softmax = numerator/denominator
    return softmax

def gen_prompt(question):
    entities = m.find_entities(question)
    # knowledge_g = g.prompt(entities)
    knowledge_t = t.prompt(entities, question)
    prompt = '已知:' + knowledge_t + '\n\n以下是关于中医的单项选择题，请直接给出正确答案的选项。\n\n' + question + '\n答案是：'
    return prompt

n_shots = []
with open('data/test/medical-shot.csv','r', encoding='utf-8') as f:
    data = csv.reader(f)
    next(data)
    for line in data:
        question = '\n'.join(line[1:7])
        prompt = gen_prompt(question)
        n_shots.append(prompt + line[7])

def write_ans(output_dir,shot_num,line,predict):
    if not os.path.exists(output_dir):
        os.mkdir(output_dir)
    with open(f"{output_dir}/medical--{shot_num}.csv",'a',encoding='utf-8') as f:
        writer = csv.writer(f)
        writer.writerow(line + [predict])

def write_cfg(output_dir,args):
    if not os.path.exists(output_dir):
        os.mkdir(output_dir)
    with open(f"{output_dir}/config.json",'w',encoding='utf-8') as f:
        json.dump(args.__dict__, f,ensure_ascii=False,indent=4)

cors = {i: [] for i in range(len(n_shots) + 1)}
current_datetime = datetime.datetime.now()
formatted_datetime = current_datetime.strftime("%Y-%m-%d-%H-%M-%S")
output_dir = f'output/{formatted_datetime}'
write_cfg(output_dir,args)

with open('data/test/medical.csv','r',encoding='utf-8') as f:
    data = csv.reader(f)
    next(data)
    for line in tqdm(data):
        question = '\n'.join(line[1:7])
        label = line[7]
        prompt = gen_prompt(question)
        print(prompt)
        for shot_num in range(len(n_shots) + 1):
            inputs = tokenizer(prompt, return_tensors="pt").to("cuda")
            if "token_type_ids" in inputs: # For Falcon
                inputs.pop("token_type_ids")
            
            with torch.no_grad():
                outputs = model(**inputs)
                last_token_logits = outputs.logits[:, -1, :]
                choice_logits = last_token_logits[:, choice_ids].detach().cpu().numpy()
                conf = softmax(choice_logits[0])[choices.index(label)]
                pred = {0: "A", 1: "B", 2: "C", 3: "D",4: "E"}[np.argmax(choice_logits[0])]

            cors[shot_num].append(pred == label)
            write_ans(output_dir,shot_num,line,pred)
            if shot_num < len(n_shots):
                prompt = n_shots[shot_num] + '\n' + prompt

    for shot_num in cors:
        acc = np.mean(cors[shot_num])
        print("Shot num {} Average accuracy {:.3f}".format(shot_num, acc))
