import os
import time
import numpy as np
import pandas as pd
import json

import mindspore as ms
from mindspore.train import Model
from mindspore import load_checkpoint, load_param_into_net
from mindspore.parallel import set_algo_parameters
from mindspore.parallel._cost_model_context import _set_multi_subgraphs

from mindformers import pipeline
from mindformers import LlamaForCausalLM, LlamaConfig, AutoTokenizer, LlamaTokenizer
from mindformers import init_context
from mindformers.modules import TransformerOpParallelConfig
from mindformers.trainer.utils import get_last_checkpoint
from mindformers.tools import logger

from llama_model import CevalLlama

SEQ_LENGTH = 2048
DISTRIBUTED_CKPT_PATH = os.getenv("DISTRIBUTED_CKPT_PATH", "")
# DISTRIBUTED_CKPT_PATH = "/home/ma-user/work/llama_7b.ckpt"


# set context
context_config = {"device_target": "Ascend", "mode": 0,  "max_device_memory": "31GB"}
parallel_context_config = {"parallel_mode": 1, "gradients_mean": False, "full_batch": True}
rank_id, device_num = init_context(use_parallel=True, context_config=context_config, parallel_config=parallel_context_config)
set_algo_parameters(elementwise_op_strategy_follow=True, fully_use_devices=True)
_set_multi_subgraphs()

choices = ["A", "B", "C", "D"]

def format_example(line, include_answer=False, cot=False):
    example = line['question']
    for choice in choices:
        example += f'\n{choice}. {line[f"{choice}"]}'
    if include_answer:
        if cot:
            example += "\n答案：让我们一步一步思考，\n" + \
                       line["explanation"] + f"\n所以答案是{line['answer']}。\n\n"
        else:
            example += '\n答案：' + line["answer"] + '\n\n'
    else:
        if cot:
            example += "\n答案：让我们一步一步思考，\n1."
        else:
            example += '\n答案：'
    return example

subject_mapping_path = 'subject_mapping.json'
with open(subject_mapping_path, 'r') as file:
    subject_mapping = json.load(file)


def generate_few_shot_prompt(subject, dev_df, cot=False, k=-1):
    prompt = f"以下是中国关于{subject_mapping[subject][1]}考试的单项选择题，请选出其中的正确答案。\n\n"
    if k == -1:
        k = dev_df.shape[0]
    for i in range(k):
        prompt += format_example(
            dev_df.iloc[i, :],
            include_answer=True,
            cot=cot
        )
    return prompt


# config blooom 7.1b
config = LlamaConfig(
    embedding_init_type="float32",
    checkpoint_name_or_path="",
    seq_length=SEQ_LENGTH,
    hidden_size=8192,
    num_layers=80,
    num_heads=64,
    n_kv_heads=8,
    vocab_size=50000,
    multiple_of=256,
    pad_token_id=50000,
    max_decode_length=1024,
    ffn_dim_multiplier=1.3,
    hidden_dropout_rate=0.0,
    attention_dropout_rate=0.0,
    use_past=True,
    top_k=3,
    top_p=1,
    do_sample=True,
    temperature=0.8,
    parallel_config=TransformerOpParallelConfig(
        data_parallel=1,
        model_parallel=8,
        pipeline_stage=1,
        vocab_emb_dp=True
        )
    )


with open("subjects.txt", "r") as file:
    subjects = file.readlines()
subjects = [subject_name.strip() for subject_name in subjects]
index = np.array([319, 350, 315, 360, 29909, 29933, 29907, 29928])
mapping = {0: "A", 1: "B", 2: "C", 3: "D"}

def chat():
    # init bloom
    tokenizer = LlamaTokenizer("/home/ma-user/work/chat_old/tokenizer.model")
    # llama = LlamaForCausalLM(config)
    llama = CevalLlama(config)
    llama.set_train(False)
    print(llama.config)
    print("*********************************************")
    print(llama.lm_head.weight.shape)
    print("*********************************************")
    print(llama.config.parallel_config.vocab_emb_dp)
    if DISTRIBUTED_CKPT_PATH:
        # find the sharded ckpt path for this rank
        ckpt_path = os.path.join(DISTRIBUTED_CKPT_PATH, "rank_{}".format(rank_id))
        ckpt_path = get_last_checkpoint(ckpt_path)
        logger.info("ckpt path: %s", str(ckpt_path))

        # shard bloom and load sharded ckpt
        #m = Model(llama)
        #m.infer_predict_layout(ms.Tensor(np.ones(shape=(1, SEQ_LENGTH)), ms.int32))
        infer_data=(ms.Tensor(np.ones(shape=(1, SEQ_LENGTH)), ms.int32),)
        llama.set_auto_parallel()
        llama.compile(*infer_data)
        print(llama.lm_head.weight.shape)
        print("*******************************************")
        checkpoint_dict = load_checkpoint(ckpt_path)
        not_load_network_params = load_param_into_net(llama, checkpoint_dict)
        logger.info("Network parameters are not loaded: %s", str(not_load_network_params))

    # question_list = [
    #     "This is my motivation letter to apply the master course of Global Business in the college:",
    #     "大型网站建设最关心的问题就是网站速度",
    #     "糖渍板栗是一种以新鲜板栗",
    #     "呼伦贝尔是全世界最大的市",
    #     "我是练习时长两年半的个人练习生"
    #     ]
    # subject_name = "operating_system"
    # dev_file_path = os.path.join('data/dev', f'{subject_name}_dev.csv')
    # dev_df = pd.read_csv(dev_file_path)

    # for idx, question in enumerate(question_list):

    for subject_name in subjects:
        dev_file_path = os.path.join('ceval/dev', f'{subject_name}_dev.csv')
        dev_df = pd.read_csv(dev_file_path)
        val_file_path = os.path.join('ceval/val', f'{subject_name}_val.csv')
        val_df = pd.read_csv(val_file_path)
        few_shot_prompt = generate_few_shot_prompt(subject_name, dev_df, cot=False)

        correct = 0
        total = 0
        for idx, row in val_df.iterrows():
            t1=time.time()
            question = format_example(row)
            full_prompt = few_shot_prompt + question
            inputs = tokenizer.encode(full_prompt)
            inputs = np.array([inputs]).astype(np.int32) # add batch dim
            # outputs = llama.generate(inputs, max_length=512, do_sample=True, eos_token_id=2)
            outputs, probs_lst = llama.generate(inputs, max_length=2048, do_sample=True, eos_token_id=2)
            outputs = outputs[0] # remove batch dim
            # np.save(f"probs_{idx}.npy", probs_lst)
            pred = mapping[np.argmax(probs_lst[0][0, index]) % 4]
            correct += (pred == row['answer'])
            total += 1
            # print(tokenizer.decode(outputs))
            print(f"Subject {subject_name} Idx {idx}: Pred: {pred} <==> Answer: {row['answer']}")
            print("chat time :",time.time()-t1)
        print(f"Correct ratio for subject {subject_name}: {correct}/{total} {correct / total * 100:.1f}%")

if __name__ == "__main__":
    chat()
    # probs = np.array([0, 1, 2, 3])
    # index = np.array([319, 350, 315, 360, 29909, 29933, 29907, 29928])
    # mapping = {0: "A", 1: "B", 2: "C", 3: "D"}
    # pred = mapping[np.argmax(probs[0, index]) % 4]
