from basic.SlideWindow import *
import numpy as np
import random

def batch_slice(data, batch_size):
    batch_num = int(np.ceil(len(data) / float(batch_size)))
    for i in range(batch_num):
        cur_batch_size = batch_size if i < batch_num - 1 else len(data) - batch_size * i
        sentences = [data[i * batch_size + b] for b in range(cur_batch_size)]

        yield sentences

def data_iter(data, batch_size, shuffle=True):
    """
    randomly permute data, then sort by source length, and partition into batches
    ensure that the length of  sentences in each batch
    """
    batched_data = []
    if shuffle: np.random.shuffle(data)
    batched_data.extend(list(batch_slice(data, batch_size)))

    if shuffle: np.random.shuffle(batched_data)
    for batch in batched_data:
        yield batch

def token2id(instances, tokenizer, vocab):
    for inst in instances:
        inst.s_inputs = tokenizer.encode_plus(inst.s_sentence, add_special_tokens=True)
        inst.t_inputs = tokenizer.encode_plus(inst.t_sentence, add_special_tokens=True)
    return

def filter(instances, max_instance, max_seq_len):
    f_insts = []
    offset = 0
    for inst in instances:
        if offset >= max_instance: break
        if len(inst.s_inputs['input_ids']) > max_seq_len or len(inst.t_inputs['input_ids']) > max_seq_len: 
            continue
        f_insts.append(inst)
        offset += 1
    return f_insts

def cl_sample(instances, negative):
    inst_len = len(instances)
    for idx, inst in enumerate(instances):

        while len(inst.neg_t_sentences) < negative:
            rand_index = random.randint(0, inst_len - 1)
            if rand_index != idx:
                neg_inst = instances[rand_index]
                inst.neg_t_sentences.append(neg_inst.t_inputs)
        
        while len(inst.neg_s_sentences) < negative:
            rand_index = random.randint(0, inst_len - 1)
            if rand_index != idx:
                neg_inst = instances[rand_index]
                inst.neg_s_sentences.append(neg_inst.s_inputs)


def text_variable(b_chunked_inputs, b_chunked_att_masks):
    chunk_lengths = [len(chunked_input) for chunked_input in b_chunked_inputs]
    max_chunk_len = max(chunk_lengths)

    token_lengths = [len(chunk) for chunked_inputs in b_chunked_inputs for chunk in chunked_inputs]
    max_token_len = max(token_lengths)
    batch_size = len(b_chunked_inputs)

    chunked_input_ids = np.zeros([batch_size, max_chunk_len, max_token_len], dtype=np.long) 
    chunked_attention_mask = np.zeros([batch_size, max_chunk_len, max_token_len], dtype=np.long)

    for idx, chunked_inputs in enumerate(b_chunked_inputs):
        chunked_att_masks = b_chunked_att_masks[idx]
        for idy, chunk_token_ids in enumerate(chunked_inputs):
            token_length = len(chunk_token_ids)
            for idz in range(token_length):
                chunked_input_ids[idx, idy, idz] = chunked_inputs[idy][idz]
                chunked_attention_mask[idx, idy, idz] = chunked_att_masks[idy][idz]

    chunked_input_ids = torch.from_numpy(chunked_input_ids)
    chunked_attention_mask = torch.from_numpy(chunked_attention_mask)
    return chunked_input_ids, chunked_attention_mask

def index2tensor(b_indexs):
    b = len(b_indexs)
    t = max([len(indexs) for indexs in b_indexs])
    _tensor = np.zeros([b, t], dtype=np.long)

    for idx, indexs in enumerate(b_indexs):
        for idy, index in enumerate(indexs):
            _tensor[idx, idy] = index
    _tensor = torch.tensor(_tensor)
    return _tensor

def input_variable(onebatch):
    s_input_ids = index2tensor([instance.s_inputs['input_ids'] for instance in onebatch])
    t_input_ids = index2tensor([instance.t_inputs['input_ids'] for instance in onebatch])

    s_att_mask = index2tensor([instance.s_inputs['attention_mask'] for instance in onebatch])
    t_att_mask = index2tensor([instance.t_inputs['attention_mask'] for instance in onebatch])

    return s_input_ids, s_att_mask, t_input_ids, t_att_mask

def constrast_variable(onebatch):
    b = len(onebatch)

    neg_t_input_ids = index2tensor( [neg_t['input_ids'] for instance in onebatch for neg_t in instance.neg_t_sentences] )
    neg_t_att_mask = index2tensor( [neg_t['attention_mask'] for instance in onebatch for neg_t in instance.neg_t_sentences] )

    neg_s_input_ids = index2tensor( [neg_s['input_ids'] for instance in onebatch for neg_s in instance.neg_s_sentences] )
    neg_s_att_mask = index2tensor( [neg_s['attention_mask'] for instance in onebatch for neg_s in instance.neg_s_sentences] )

    _, t = neg_t_input_ids.size()
    neg_t_input_ids = neg_t_input_ids.view(b, -1, t)
    neg_t_att_mask = neg_t_att_mask.view(b, -1, t)

    _, t = neg_s_input_ids.size()
    neg_s_input_ids = neg_s_input_ids.view(b, -1, t)
    neg_s_att_mask = neg_s_att_mask.view(b, -1, t)

    return neg_s_input_ids, neg_s_att_mask, neg_t_input_ids, neg_t_att_mask
