import numpy as np
import torch
from basic.SlideWindow import chuncking, merge_chuncked_reps
from basic.Doc import *
from basic.Vocab import *

def read_sent(inf):
    sentence = []
    for line in inf:
        line = line.strip()
        if line == '':
            yield sentence
            sentence = []
        else:
            sentence.append(line)
    if len(sentence) > 0:
        yield sentence

def read_corpus(file_path, max_edu_num, eval=False):
    with open(file_path, mode='r', encoding='utf8') as inf:
        doc_data = []
        for inst in read_sent(inf):
            if inst[0].find("# newdoc id =") == 0:
                doc_name = inst[0].split('=')[1].strip()
                doc = Doc()
                doc.firstline = inst[0]
                doc.name = doc_name
                doc.sentences_conll.append(inst[1:])
                doc_data.append(doc)
            else:
                doc.sentences_conll.append(inst)
    filter_doc_data = []
    for doc in doc_data:
        doc.extract_conll()
        if len(doc.EDUs) >= 2 and len(doc.EDUs) < max_edu_num:
            filter_doc_data.append(doc)
    doc_num = len(filter_doc_data)
    sent_num = 0
    for doc in filter_doc_data:
        sent_num += len(doc.sentences)
    if not eval:
        print("Info: ", file_path)
        print("Doc num: ", doc_num)
        print("Sentence num: ", sent_num)
    return filter_doc_data


def dc_labeling(data, vocab):
    for instance in data:
        dcs = []
        for edu in instance.EDUs:
            assert len(edu) > 0
            dcs.append(edu[0])
        
        instance.dc_ids = []
        for dc in dcs:
            id = vocab.DC2id(dc)
            instance.dc_ids.append(id)

        assert len(instance.dc_ids) == len(instance.EDUs)

def doc_tokenization(data, tokenizer):
    for instance in data:
        edu_lens = []
        doc_tokens = []
        for edu in instance.EDUs:
            edu_text = " ".join(edu)
            edu_tokens = tokenizer.tokenize(edu_text)

            doc_tokens += edu_tokens
            edu_lens.append(len(edu_tokens))

        doc_input_ids = tokenizer.convert_tokens_to_ids(doc_tokens)

        input_ids, instance.padded_token_len, instance.win = chuncking(doc_input_ids)

        instance.inputs = {
            "input_ids": input_ids,
        }
        return_token_type_ids = "token_type_ids" in tokenizer.model_input_names
        return_attention_mask = "attention_mask" in tokenizer.model_input_names

        if return_token_type_ids:
            doc_token_type_ids = [0 for idx in range(len(doc_input_ids))]
            token_type_ids, _, _ = chuncking(doc_token_type_ids)
            instance.inputs["token_type_ids"] = token_type_ids

        if return_attention_mask:
            doc_attention_mask = [1 for idx in range(len(doc_input_ids))]
            attention_mask, _, _ = chuncking(doc_attention_mask)
            instance.inputs["attention_mask"] = attention_mask

        instance.edu_lens = edu_lens


def batch_slice(data, batch_size):
    batch_num = int(np.ceil(len(data) / float(batch_size)))
    for i in range(batch_num):
        cur_batch_size = batch_size if i < batch_num - 1 else len(data) - batch_size * i
        sentences = [data[i * batch_size + b] for b in range(cur_batch_size)]

        yield sentences


def data_iter(data, batch_size, shuffle=True):
    """
    randomly permute data, then sort by source length, and partition into batches
    ensure that the length of  sentences in each batch
    """

    batched_data = []
    if shuffle: np.random.shuffle(data)
    batched_data.extend(list(batch_slice(data, batch_size)))

    if shuffle: np.random.shuffle(batched_data)
    for batch in batched_data:
        yield batch

def label_variable(onebatch):
    b = len(onebatch)
    e = max([len(instance.dc_ids) for instance in onebatch])

    batch_DC_ids = np.ones([b, e], dtype=np.long) * -1

    for idx, instance in enumerate(onebatch):
        for idy, dc_id in enumerate(instance.dc_ids):
            batch_DC_ids[idx, idy] = dc_id

    batch_DC_ids = torch.tensor(batch_DC_ids)
    return batch_DC_ids

def input_variable(onebatch):
    return_attention_mask = "attention_mask" in onebatch[0].inputs
    
    chunk_lengths = [len(instance.inputs['input_ids']) for instance in onebatch]
    max_chunk_len = max(chunk_lengths)
    token_lengths = [len(chunk) for instance in onebatch for chunk in instance.inputs['input_ids']]
    max_token_len = max(token_lengths)
    batch_size = len(onebatch)

    chunked_input_ids = np.zeros([batch_size, max_chunk_len, max_token_len], dtype=np.long) 
    if return_attention_mask:
        chunked_attention_mask = np.zeros([batch_size, max_chunk_len, max_token_len], dtype=np.long)
    # if return_token_type_ids:
    #     chunked_token_type_ids = np.zeros([batch_size, max_chunk_len, max_token_len], dtype=np.long)

    for idx, instance in enumerate(onebatch):
        for idy, chunk_token_ids in enumerate(instance.inputs['input_ids']):
            token_length = len(chunk_token_ids)
            for idz in range(token_length):
                chunked_input_ids[idx, idy, idz] = instance.inputs['input_ids'][idy][idz]
                # if return_token_type_ids:
                #     chunked_token_type_ids[idx, idy, idz] = instance.inputs['token_type_ids'][idy][idz]

                if return_attention_mask:
                    chunked_attention_mask[idx, idy, idz] = instance.inputs['attention_mask'][idy][idz]

    chunked_input_ids = torch.from_numpy(chunked_input_ids)
    doc_inputs = {
        "input_ids": chunked_input_ids,
        "output_hidden_states": True
        }

    if return_attention_mask:
        chunked_attention_mask = torch.from_numpy(chunked_attention_mask)
        doc_inputs["attention_mask"] = chunked_attention_mask

    return doc_inputs

def offset_variable(onebatch):
    b = len(onebatch)
    e = max([ len(instance.edu_lens) for instance in  onebatch ])

    edu_offset = np.zeros([b, e], dtype=np.long)
    for idx, instance in enumerate(onebatch):
        offset = 0
        for idy, edu_len in enumerate(instance.edu_lens):
            edu_offset[idx, idy] = offset 
            offset += edu_len
    
    edu_offset = torch.tensor(edu_offset)

    return edu_offset


def batch_doc2edu_variable(onebatch, vocab, config, token_helper):

    batch_EDU_index_list = []
    for idx, instance in enumerate(onebatch):
        EDU_texts = []
        for idy, EDU in enumerate(instance.EDUs):
            text = " ".join(EDU)
            EDU_texts.append(text)
        EDU_tokens_list = token_helper.batch_text2tokens(EDU_texts)
        start = 0
        end = 0
        EDU_index_list = []
        for idy, EDU_tokens in enumerate(EDU_tokens_list):
            end += len(EDU_tokens)
            index_list = []
            for idz in range(start, end):
                index_list.append(idz)
            start += len(EDU_tokens)
            EDU_index_list.append(index_list)
        batch_EDU_index_list.append(EDU_index_list)

    batch_size = len(onebatch)
    edu_lengths = [len(instance.EDUs) for instance in onebatch]
    max_edu_num = max(edu_lengths)
    max_EDU_tok_len = max([len(EDU_tokens) for EDU_tokens_list in batch_EDU_index_list for EDU_tokens in EDU_tokens_list])

    EDU_offset_index = np.zeros([batch_size, max_edu_num, max_EDU_tok_len], dtype=np.long)
    batch_denominator = np.zeros([batch_size, max_edu_num, max_EDU_tok_len], dtype=np.float32)
    for idx, EDU_tokens_list in enumerate(batch_EDU_index_list):
        for idy, EDU_tokens in enumerate(EDU_tokens_list):
            for idz, tok in enumerate(EDU_tokens):
                EDU_offset_index[idx, idy, idz] = batch_EDU_index_list[idx][idy][idz]
                batch_denominator[idx, idy, idz] = float(1 / len(batch_EDU_index_list[idx][idy]))

    EDU_offset_index = torch.tensor(EDU_offset_index)
    batch_denominator = torch.tensor(batch_denominator)
    return EDU_offset_index, batch_denominator, edu_lengths