import numpy as np
import torch
from torch.autograd import Variable
from basic.Doc import *


def read_sent(inf):
    sentence = []
    for line in inf:
        line = line.strip()
        if line == '':
            yield sentence
            sentence = []
        else:
            sentence.append(line)
    if len(sentence) > 0:
        yield sentence

def read_corpus(file_path, eval=False):
    with open(file_path, mode='r', encoding='utf8') as inf:
        doc_data = []
        for inst in read_sent(inf):
            if inst[0].find("# newdoc id =") == 0:
                doc_name = inst[0].split('=')[1].strip()
                doc = Doc()
                doc.firstline = inst[0]
                doc.name = doc_name
                doc.sentences_conll.append(inst[1:])
                doc_data.append(doc)
            else:
                doc.sentences_conll.append(inst)
    doc_num = len(doc_data)
    sent_num = 0
    for doc in doc_data:
        sent_num += len(doc.sentences_conll)
        doc.extract_conll()
    if not eval:
        print("Info: ", file_path)
        print("Doc num: ", doc_num)
        print("Sentence num: ", sent_num)
    return doc_data

def batch_slice(data, batch_size):
    batch_num = int(np.ceil(len(data) / float(batch_size)))
    for i in range(batch_num):
        cur_batch_size = batch_size if i < batch_num - 1 else len(data) - batch_size * i
        sentences = [data[i * batch_size + b] for b in range(cur_batch_size)]

        yield sentences



def data_iter(data, batch_size, shuffle=True):
    """
    randomly permute data, then sort by source length, and partition into batches
    ensure that the length of  sentences in each batch
    """

    batched_data = []
    if shuffle: np.random.shuffle(data)
    batched_data.extend(list(batch_slice(data, batch_size)))

    if shuffle: np.random.shuffle(batched_data)
    for batch in batched_data:
        yield batch

def inst(data):
    inst_list = []
    for doc in data:
        for sentence, sentence_labels in zip(doc.sentences, doc.sentences_labels):
            assert len(sentence) == len(sentence_labels)

            inst = Instance()
            inst.sentence = sentence
            inst.labels = sentence_labels
            inst_list.append(inst)

    return inst_list

def word2id(data, tokenizer):
    for instance in data:
        instance.tokenized_inputs = tokenizer(instance.sentence, is_split_into_words=True, add_special_tokens=False)
    return

def label2id(data, vocab):
    for instance in data:
        label_ids = vocab.label2id(instance.labels)
        aligned_label_ids = []
        pre_word_idx = -1

        word_ids = instance.tokenized_inputs.word_ids()
        for word_idx in word_ids:

            if word_idx != pre_word_idx:
                aligned_label_ids.append(label_ids[word_idx])
            else:
                aligned_label_ids.append(-100)

            pre_word_idx = word_idx
        instance.aligned_label_ids = aligned_label_ids
        instance.label_ids = label_ids
    return

def batch_inputs(onebatch):
    batch_size = len(onebatch)
    max_length =  max( [ len(instance.tokenized_inputs['input_ids']) for instance in onebatch ] )
    input_ids = np.zeros([batch_size, max_length], dtype=np.longlong) 
    attention_mask = np.zeros([batch_size, max_length], dtype=np.longlong) 

    for idx in range(batch_size):
        instance = onebatch[idx]

        for idy, token_id in enumerate(instance.tokenized_inputs['input_ids']):
            input_ids[idx, idy] = token_id
            attention_mask[idx, idy] = 1

    input_ids = torch.from_numpy(input_ids)
    attention_mask = torch.from_numpy(attention_mask)

    inputs = {
        "input_ids": input_ids,
        "attention_mask": attention_mask,
        "output_hidden_states": True
    }
    
    return inputs

def batch_label(onebatch):
    batch_size = len(onebatch)
    max_length = max([len(instance.aligned_label_ids) for instance in onebatch])

    gold_labels = np.ones([batch_size, max_length], dtype=np.longlong) * -100

    for idx, instance in enumerate(onebatch):
        for idy, label_id in enumerate(instance.aligned_label_ids):
            gold_labels[idx, idy] = label_id

    gold_labels = torch.from_numpy(gold_labels)
    return gold_labels