
import csv
import argparse
import numpy as np
import os
import datetime
import json
from tqdm import tqdm
import glob

import torch
from transformers import LlamaTokenizer, LlamaForCausalLM
from transformers import AutoTokenizer,AutoModelForCausalLM
from peft import PeftModel

from graph import ChineseGraph
from ner import MatchExtract
from text import Text

parser = argparse.ArgumentParser()
parser.add_argument("--model_name_or_path", type=str, default="/home/llm_user/Baichuan2-7B-Chat")
parser.add_argument("--lora_weights", type=str, default="")
parser.add_argument("--data_dir", type=str, default="../data")
parser.add_argument("--load_in_8bit", action='store_true')
parser.add_argument("--with_conf", action='store_true')
parser.add_argument("--with_rag", action='store_true')
args = parser.parse_args()

m = MatchExtract('/home/llm_user/index/medical/llm-retrieval-qa/data/entities-v2.txt')
g = ChineseGraph('/home/llm_user/index/medical/llm-retrieval-qa/data/triples-v2.txt', m)
t = Text('/home/llm_user/index/medical/llm-retrieval-qa/data/text', m)

tokenizer_class = LlamaTokenizer if 'llama' in args.model_name_or_path else AutoTokenizer
model_class = LlamaForCausalLM if 'llama' in args.model_name_or_path else AutoModelForCausalLM
tokenizer = tokenizer_class.from_pretrained(args.model_name_or_path, trust_remote_code=True)
model = model_class.from_pretrained(args.model_name_or_path,
                                    trust_remote_code=True,
                                    load_in_8bit=args.load_in_8bit,
                                    device_map="auto"
                                    )

if args.lora_weights != "":
    model = PeftModel.from_pretrained(
                    model,
                    args.lora_weights,
                    torch_dtype=torch.float16,
                    )
model.eval()
choices = ["A", "B", "C", "D", "E"]
choice_ids = [tokenizer.convert_tokens_to_ids(choice) for choice in choices]

def softmax(x):
    z = x - max(x)
    numerator = np.exp(z)
    denominator = np.sum(numerator)
    softmax = numerator/denominator
    return softmax

def gen_rag_prompt(question, question_type):
    entities = m.find_entities(question)
    # knowledge_g = g.prompt(entities)
    knowledge_t = t.prompt(entities, question)
    prompt = '已知:' + knowledge_t + f'\n\n以下是关于中医的{question_type}，请直接给出正确答案的选项。\n\n' + question + '\n答案是：'
    return prompt

def gen_prompt(question, question_type):
    prompt = f'以下是关于中医的{question_type}，请直接给出正确答案的选项。\n\n' + question + '\n答案是：'
    return prompt


n_shots = []
with open('data/test/medical-1shot.csv','r', encoding='utf-8') as f:
    data = csv.reader(f)
    next(data)
    for line in data:
        question = '\n'.join(line[1:7])
        prompt = gen_prompt(question, '单项选择题')
        n_shots.append(prompt + line[7])

def write_ans(output_dir,shot_num,line,predict):
    if not os.path.exists(output_dir):
        os.mkdir(output_dir)
    with open(f"{output_dir}/medical--{shot_num}.csv",'a',encoding='utf-8') as f:
        writer = csv.writer(f)
        writer.writerow(line + [predict])

def write_submit(output_dir):
    if not os.path.exists(output_dir):
        os.mkdir(output_dir)
    csv_files = glob.glob(f"{output_dir}/*.csv")
    ans = []
    for _id in range(1,11201):
        ans.append({'id':_id, 'model_answer':'A'})
    for csv_file in csv_files:
        with open(csv_file, 'r', encoding='utf-8') as f:
            reader = csv.reader(f)
            for line in reader:
                ans[int(line[0])-1]['model_answer'] = line[-1]

        with open(csv_file.replace('.csv','.json'),'w',encoding='utf-8') as f:
            json.dump(ans,f,ensure_ascii=False,indent=4)

def write_cfg(output_dir,args):
    if not os.path.exists(output_dir):
        os.mkdir(output_dir)
    with open(f"{output_dir}/config.json",'w',encoding='utf-8') as f:
        json.dump(args.__dict__, f,ensure_ascii=False,indent=4)

cors = {i: [] for i in range(len(n_shots) + 1)}
current_datetime = datetime.datetime.now()
formatted_datetime = current_datetime.strftime("%Y-%m-%d-%H-%M-%S")
# formatted_datetime = '2024-03-11-15-22-18'
output_dir = f'output/{formatted_datetime}'
write_cfg(output_dir,args)

with open('data/test/medical-2.csv','r',encoding='utf-8') as f:
    data = csv.reader(f)
    next(data)
    for line in tqdm(data):
        question = '\n'.join(line[1:7])
        question_type = line[7]
        if args.with_rag:
            prompt = gen_rag_prompt(question, question_type)
        else:
            prompt = gen_prompt(question, question_type)

        # print(prompt)
        for shot_num in range(len(n_shots) + 1):
            inputs = tokenizer(prompt, return_tensors="pt").to("cuda")
            if "token_type_ids" in inputs: # For Falcon
                inputs.pop("token_type_ids")
            
            if '单项选择题' == question_type:
                with torch.no_grad():
                    outputs = model(**inputs)
                    last_token_logits = outputs.logits[:, -1, :]
                    choice_logits = last_token_logits[:, choice_ids].detach().cpu().numpy()
                    pred = {0: "A", 1: "B", 2: "C", 3: "D",4: "E"}[np.argmax(choice_logits[0])]
            else:
                output = model.generate(inputs['input_ids'],max_new_tokens=10)[0,inputs['input_ids'].shape[1]:]
                output = tokenizer.decode(output)
                pred = ''.join([c for c in choices if c in output])

            write_ans(output_dir,shot_num,line,pred)
            if shot_num < len(n_shots):
                prompt = n_shots[shot_num] + '\n' + prompt

    write_submit(output_dir)
