import random 
import torch
import argparse
import numpy as np
import os
import pickle

from modules.SegModel import *
from driver.EDUSegmenter import *
from basic.Config import *
from driver.Dataloader import read_corpus

from TrainTest import scripts_evaluate, predict

from datasets import load_dataset
from transformers import AutoModel, AutoTokenizer, AutoConfig, set_seed
from loguru import logger
import tqdm 

from nltk.tokenize import sent_tokenize, word_tokenize
from driver.Dataloader import *

from torch.cuda.amp import autocast as autocast
from torch.cuda.amp.grad_scaler import GradScaler


def preprocess_function(examples):
    doc_inputs = []
    doc_words = []
    doc_word_ids = []

    for text in examples['text']:
        sent_texts = sent_tokenize(text, language='german')

        inputs_list = []
        words_list  = []
        word_ids_list = []

        for s_text in sent_texts:
            words = word_tokenize(s_text)
            inputs = plm_tokenizer(words,  is_split_into_words=True, add_special_tokens=False)
            word_ids = inputs.word_ids()

            inputs_list.append(inputs)
            words_list.append(words)
            word_ids_list.append(word_ids)
            

        doc_inputs.append(inputs_list)
        doc_words.append(words_list)
        doc_word_ids.append(word_ids_list)

    return dict(
        doc_inputs=doc_inputs,
        doc_words=doc_words,
        doc_word_ids=doc_word_ids
    )

def collect(examples):
    batch_size = len(examples)
    max_length =  max( [ len(example['input_ids']) for example in examples ] )

    input_ids = np.zeros([batch_size, max_length], dtype=np.longlong) 
    attention_mask = np.zeros([batch_size, max_length], dtype=np.longlong) 

    for idx, example in enumerate(onebatch):
        for idy, token_id in enumerate(example['input_ids']):
            input_ids[idx, idy] = token_id
            attention_mask[idx, idy] = 1
    input_ids = torch.from_numpy(input_ids)
    attention_mask = torch.from_numpy(attention_mask)

    inputs = {
        "input_ids": input_ids,
        "attention_mask": attention_mask,
        "output_hidden_states": True
    }
    return inputs

def filter_long_sent(example):
    flag = [ len(sent_input['input_ids']) < 512 for sent_input in example['doc_inputs'] ]
    return all(flag)
    

if __name__ == '__main__':
    ### process id
    logger.info("Process ID {}, Process Parent ID {}".format(os.getpid(), os.getppid()))

    set_seed(666)

    ### gpu
    gpu = torch.cuda.is_available()
    print("GPU available: ", gpu)
    print("CuDNN: \n", torch.backends.cudnn.enabled)

    argparser = argparse.ArgumentParser()
    argparser.add_argument('--config_file', default='examples/default.cfg')
    argparser.add_argument('--model', default='BaseParser')
    argparser.add_argument('--thread', default=4, type=int, help='thread num')
    argparser.add_argument('--lang', default='frr')
    argparser.add_argument('--use-cuda', action='store_true', default=True)
    argparser.add_argument('--num', default=15000, type=int, help='doc num')


    args, extra_args = argparser.parse_known_args()
    config = Configurable(args.config_file, extra_args)

    config.use_cuda = False
    if gpu and args.use_cuda: config.use_cuda = True

    logger.info(f"GPU using status:\t{config.use_cuda}")
    logger.info(f'Load pretrained plm: {config.plm_load_dir}')

    plm_model = AutoModel.from_pretrained(config.plm_load_dir)
    plm_config = AutoConfig.from_pretrained(config.plm_load_dir)
    plm_tokenizer = AutoTokenizer.from_pretrained(config.plm_load_dir, trust_remote_code=True)

    logger.info(f'Load pretrained plm ok')

    vocab = pickle.load(open(config.load_vocab_path, 'rb'))
    seg_model = SegModel(plm_model, plm_config, vocab)

    seg_model.output_layer.load_state_dict(torch.load(config.load_model_path)['dec'])
    if config.use_cuda:
        seg_model.cuda()

    segmenter = EDUSegmenter(seg_model, config)
    
    logger.info(f'loading dataset')
    dataset = load_dataset('wikipedia', '20220301.' + args.lang,
                        beam_runner='DirectRunner', keep_in_memory=True, num_proc=args.thread)['train']
    dataset = dataset.filter(lambda example: len(example['text']) < 4096, num_proc=args.thread).select(range(args.num * 4))

    logger.info(f"dataest {dataset}")

    dataset = dataset.map(
        function=preprocess_function,
        batched=True,
        num_proc=args.thread,
        keep_in_memory=True,
        desc="Running tokenizer on train dataset"
    )
    
    dataset = dataset.filter( filter_long_sent, keep_in_memory=True, num_proc=args.thread).select(range(args.num))

    logger.info(f"dataest {dataset}")

    out_path = 'output.edus.' + args.lang

    with open(out_path, encoding='utf8', mode='w') as outf:
        for doc_example in tqdm.tqdm(dataset):
            offset = 0
            all_labels = []

            for onebatch in data_iter(doc_example['doc_inputs'], config.test_batch_size, False):
                inputs = collect(onebatch)
                with autocast():
                    segmenter.forward(inputs)
                
                predict_label_ids = np.argmax(segmenter.logits.cpu().detach().numpy(), axis=-1)
                batch_size = len(onebatch)
                batch_labels = []
                for idx in range(batch_size):
                    word_ids = doc_example['doc_word_ids'][offset]
                    label_ids = []
                    pre_word_index = -1
                    for idy, word_index in enumerate(word_ids):
                        if word_index != pre_word_index:
                            label_ids.append( predict_label_ids[idx, idy])
                        pre_word_index = word_index

                    labels = vocab.id2label(label_ids)
                    batch_labels.append(labels)
                    offset += 1
                all_labels += batch_labels
            
            sent_size = len(doc_example['doc_words'])

            filter_flag = False
            for idx in range(sent_size):
                words = doc_example['doc_words'][idx]
                labels = all_labels[idx]
                if len(words) != len(labels):
                    filter_flag = True
                    break

            if not filter_flag:
                for idx in range(sent_size):
                    words = doc_example['doc_words'][idx]
                    labels = all_labels[idx]
                    if idx == 0:
                        outf.write("# newdoc id = " + doc_example['id'] + "### title = " + doc_example['title'] + "### url = " + doc_example['url'] + "\n")

                    assert len(words) == len(labels)
                    word_len = len(words)
                    for idy in range(word_len):
                        if labels[idy] == "b":
                            label = "BeginSeg=Yes"
                        else:
                            label = "_"
                        info = [str(idy + 1), words[idy], "_" , "_", "_", "_", "_", "_", "_", label]
                        line = "\t".join(info)
                        outf.write(line + "\n")
                    outf.write("\n")
