import os, sys
import re
import time

import numpy as np
sys.path.append(os.getcwd())
print("sys append", os.getcwd())
from src.tokenization_jieba import JIEBATokenizer
from mindspore.train.callback import Callback
from mindspore import save_checkpoint
from mindspore.nn.metrics import Metric
from mindspore._checkparam import Validator as validator
from mycode.cmrc2018_evaluate import calc_f1_score, calc_em_score
from src.generate import generate, generate_increment
import mindspore.dataset as ds
# custom callback function
# cb_params.batch_num = sink_size = 2
def eval_gener(ds_eval, model, args_opt, tokenizer, Metric_myf1, mark=44, eod_id=6):
    Metric_myf1.clear()
    print("eval Dataset size: {}".format(ds_eval.get_dataset_size()), flush=True)
    cnt = 0
    ds.config.set_seed(cnt)
    # ds_eval = ds_eval.shuffle(buffer_size=1000)
    itera = ds_eval.create_dict_iterator()
    for data in itera:
        start_time = time.time()
        origin_inputs = data["input_ids"]
        temp = origin_inputs.asnumpy()[0]
        mark_position = np.where(temp == mark)[0]
        if mark_position.size != 1:
            continue
        mark_position = mark_position[0]
        input_ids = (temp[:mark_position+1]).tolist() # (-1,)
        labels_ids = temp[mark_position+3:]
        mark_position2 = np.where(labels_ids == eod_id)[0]
        if mark_position2.size > 0:
            mark_position2 = mark_position2[0]
            labels_ids = labels_ids[:mark_position2]
        length = np.sum(labels_ids != 0)
        labels_ids = (labels_ids[:length]).tolist() # (-1,)
        generate_func = generate
        output_ids = generate_func(model, np.array(input_ids).reshape(1, -1), args_opt, 2 * length)
        if True: # only prediction
            output_ids = (output_ids[mark_position+1:]).tolist()
        else:
            labels_ids = input_ids + labels_ids
            output_ids = (output_ids).tolist()
        # Decode output ids to sentence
        output_ids = tokenizer.convert_ids_to_tokens(output_ids)
        labels_ids = tokenizer.convert_ids_to_tokens(labels_ids)
        # temp = (output_ids, labels_ids)
        result_temp2 = Metric_myf1.update(output_ids, labels_ids)
        end_time = time.time()
        eval_time = int(end_time - start_time)
        if cnt % 10 == 5 or result_temp2[0]>0:
            print("No.{}, step eval_time: {}s, result(F1,Em): {}".format(cnt, eval_time, result_temp2))
        cnt += 1
        if cnt>=100: # TODO
            cnt = 0
            break
    result_temp = Metric_myf1.eval()
    return result_temp

class EvalAndSaveCallback(Callback):
    def __init__(self, steps, eval_model, ds_eval, save_dir='', has_trained_epoch=0, has_trained_step=0,
                 myf1=None, myem=None, args_opt=None, tokenizer=None):
        super(EvalAndSaveCallback, self).__init__()
        self.steps = steps
        self.model = eval_model
        self.ds_eval = ds_eval
        self.F1 = 0
        self.Em = 0
        self.directory=save_dir
        self.steps_eval = {"step": [], "F1": [], "Em": []}
        self.has_trained_epoch = has_trained_epoch
        self.has_trained_step = has_trained_step
        self.Metric_myf1 = myf1
        # self.Metric_myem = myem
        # self.Metric_myem.clear()
        self.args_opt = args_opt
        self.mark = 44 # tokenizer.tokenize('...') = [44]
        self.tokenizer = tokenizer
        print("steps_eval", type(self.steps_eval), self.steps_eval)

    def step_end(self, run_context):
        cb_params = run_context.original_args()
        cur_step = cb_params.cur_step_num
        if cur_step % self.steps == 0:
            self.steps_eval["step"].append(cur_step)
            start_time = time.time()
            result_temp = eval_gener(self.ds_eval,
                                     self.model,
                                     self.args_opt,
                                     self.tokenizer,
                                     self.Metric_myf1,
                                     self.mark)
            # for data in self.ds_eval.create_dict_iterator():
            #     origin_inputs = data["input_ids"]
            #     temp = origin_inputs.asnumpy()[0]
            #     mark_position = np.where(temp == self.mark)[0]
            #     if mark_position.size != 1:
            #         continue
            #     mark_position = mark_position[0]
            #     input_ids = (temp[:mark_position+1]).tolist() # (-1,)
            #     labels_ids = temp[mark_position+1:]
            #     length = np.sum(labels_ids != 0)
            #     labels_ids = (labels_ids[:length]).tolist() # (-1,)
            #     generate_func = generate
            #     output_ids = generate_func(self.model, np.array(input_ids).reshape(1, -1), self.args_opt)
            #     if True: # only prediction
            #         output_ids = (output_ids[mark_position+1:]).tolist()
            #     else:
            #         labels_ids = input_ids + labels_ids
            #         output_ids = (output_ids).tolist()
            #     # Decode output ids to sentence
            #     output_ids = self.tokenizer.convert_ids_to_tokens(output_ids)
            #     labels_ids = self.tokenizer.convert_ids_to_tokens(labels_ids)
            #     if re.search('<pad>', labels_ids):
            #         as_end_ps = re.search('<pad>', labels_ids).span()[0]
            #         labels_ids = labels_ids[1:as_end_ps]
            #     temp = (output_ids, labels_ids)
            #     self.Metric_myf1.update(temp)
            # result_temp = self.Metric_myf1.eval()
            result = {
                "Em": result_temp[1],
                "F1": result_temp[0]
            }
            end_time = time.time()
            eval_time = int(end_time - start_time)
            self.steps_eval["F1"].append(result['F1'])
            # self.steps_eval["Em"].append(result['Em'])
            if 'Em' not in result.keys():
                result['Em'] = None
            print("eval_time: {}s, F1 is {}, Em is {}".format(eval_time, result['F1'], result['Em']))
            if result['F1'] >= self.F1:
                self.F1 = result['F1']
                file_name = 'pangu_cmrc2018_F1_' + str(self.F1) + ".ckpt"
                path = os.path.join(self.directory, file_name)
                save_checkpoint(save_obj=cb_params.train_network, ckpt_file_name=path)
                print("Save the maximum accuracy checkpoint,the accuracy is", self.F1)
                print("path", path)

def find_latest_ckpt(data_path, has_trained = False):
    if not has_trained:
        return None, 0, 0
    files = os.listdir(data_path)
    data = [
        os.path.join(data_path, name) for name in files
        if name.endswith(".ckpt")
    ]
    if len(data) == 0:
        return None, 0, 0
    # Ensure the order of mindrecords is same in all machines, otherwise it will meet loss converge problem.
    data.sort()
    ckpt_path = data[-1]
    pretrain_ckpt_path = os.path.basename(data[-1])
    temp = pretrain_ckpt_path.split('-')[-1]
    temp = temp.split('.')[0]
    temp = temp.split('_')
    has_trained_epoch, has_trained_step = (temp[0], temp[-1])
    if has_trained_epoch=='pangu':
        has_trained_epoch = 0
        has_trained_step = 0
    return ckpt_path, int(has_trained_epoch), int(has_trained_step)

def find_latest_file(data_path, endswith='txt'):
    files = os.listdir(data_path)
    data = [
        os.path.join(data_path, name) for name in files
        if name.endswith(endswith)
    ]
    data.sort()
    return data[-1]

class MyF1(Metric):
    r"""
    According to output_ids, labels_ids
    """
    def __init__(self):
        super(MyF1, self).__init__()
        self.clear()
        # print("create my Metric")

    def clear(self): # before each evaluation
        self.f1 = 0
        self.em = 0
        self.total_count = 0
        self.skip_count = 0
        # print("clean my Metric")

    def update(self, *inputs): # after each step
        if len(inputs) != 2:
            raise ValueError('Metric need 2 inputs (y_pred, y), but got {}'.format(len(inputs)))
        prediction, answers = (inputs[0], inputs[1])
        # print("update my Metric", answers, prediction)
        if prediction==None or answers==None:
            self.skip_count += 1
        else:
            self.total_count += 1 # TODO
            a = calc_f1_score([answers], prediction)
            b = calc_em_score([answers], prediction)
            self.f1 += a
            self.em += b
        return a, b

    def eval(self):
        f1_score = 100.0 * self.f1 / self.total_count
        em_score = 100.0 * self.em / self.total_count
        # print("eval my Metric, skip num:", self.skip_count)
        return f1_score, em_score
