import os
import json
from tqdm import tqdm
import gzip
import random
from copy import deepcopy

from utils import print_rank_0
from pprint import pprint
import numpy as np

import torch
from torch.utils.data import Dataset

from transformers import LlamaTokenizer

from datasets import load_dataset
from utils import read_json_or_jsonl_data
from utils import DEFAULT_PAD_TOKEN, DEFAULT_BOS_TOKEN, DEFAULT_EOS_TOKEN, DEFAULT_UNK_TOKEN
from utils import QUERY_PROMPT, SEP_TOKEN, STRING_SEP, IGNORE_INDEX


class TextLlmDataset(Dataset):
    def __init__(self, data):
        self.data = data 

    def __getitem__(self, i):
        return self.data[i]

    def __len__(self,):
        return len(self.data)
    

def llm_data_collactor(args, batch, tokenizer):
    input_ids, attention_mask, labels =  [], [], []
    for item1 in batch:
        item = prepare_data_item(args, item1, tokenizer=tokenizer,)
        input_ids.append(item['input_ids'])
        attention_mask.append(item['attention_mask'])
        labels.append(item['labels'])
    outputs = batch_padding(input_ids, labels, tokenizer)
    return {
        "input_ids": torch.Tensor(outputs['input_ids']).long(),
        "attention_mask": torch.Tensor(outputs['attention_mask']).float(),
        "labels": torch.Tensor(outputs['labels']).long()
    }
    # return {
    #     "input_ids": torch.Tensor(input_ids).long(),
    #     "attention_mask": torch.Tensor(attention_mask).float(),
    #     "labels": torch.Tensor(labels).long()
    # }


def llm_tokenize(sentences, tokenizer, padding="longest", add_sep_token=False, only_query=False):
    if isinstance(sentences, str):
        sentences = [sentences]
    input_ids = []
    for sent in sentences:
        if only_query:
            query_ids = tokenizer.encode(sent, add_special_tokens=False)
            input_ids.append(
                        [tokenizer.bos_token_id] + query_ids + [tokenizer.sep_token_id]
                    )
        else:
            if add_sep_token:            
                query, response = sent.split(SEP_TOKEN)
                query_ids = tokenizer.encode(query, add_special_tokens=False)
                response_ids = tokenizer.encode(response, add_special_tokens=False)
                input_ids.append(
                    [tokenizer.bos_token_id] + query_ids + [tokenizer.sep_token_id] + response_ids + [tokenizer.eos_token_id]
                )
            else:
                if SEP_TOKEN in sent:
                    query, response = sent.split(SEP_TOKEN)
                    query_ids = tokenizer.encode(query, add_special_tokens=False)
                    response_ids = tokenizer.encode(response, add_special_tokens=False)
                    input_ids.append(
                        [tokenizer.bos_token_id] + query_ids + response_ids + [tokenizer.eos_token_id]
                    )
                else:
                    input_ids.append(
                        [tokenizer.bos_token_id] + tokenizer.encode(sent, add_special_tokens=False) + [tokenizer.eos_token_id]
                    )
            
    return batch_padding(input_ids, tokenizer, padding=padding)


def batch_padding(input_ids, lables, tokenizer, padding='longest'):
    if padding == 'longest':
        max_input_length = max([len(inp_ids) for inp_ids in input_ids])
        max_length = min(tokenizer.model_max_length, max_input_length)
        max_label_length = max([len(label) for label in lables])
        max_label_length = min(tokenizer.model_max_length, max_label_length)
    else:
        max_length = tokenizer.model_max_length

    outputs = {"input_ids": [], "attention_mask": [], 'labels':[]}
    for inp_ids in input_ids:        
        attn_mask = [1] * len(inp_ids)
        if len(inp_ids) >= max_length:
            if tokenizer.truncation_side == 'left':
                inp_ids = inp_ids[-max_length :]
                attn_mask = attn_mask[-max_length :]
            else:
                inp_ids = inp_ids[:max_length]
                attn_mask = attn_mask[:max_length]
        else:
            if tokenizer.padding_side == 'left':
                inp_ids = [tokenizer.pad_token_id] * (max_length - len(inp_ids)) + inp_ids
                attn_mask = [0] * (max_length - len(attn_mask)) + attn_mask
            else:
                inp_ids =  inp_ids + [tokenizer.pad_token_id] * (max_length - len(inp_ids)) 
                attn_mask = attn_mask + [0] * (max_length - len(attn_mask))

        outputs['input_ids'].append(deepcopy(inp_ids))
        outputs['attention_mask'].append(deepcopy(attn_mask))
    
    for label in lables:
        if len(label) >= max_label_length:
            if tokenizer.truncation_side == 'left':
                label = label[-max_label_length :]
            else:
                label = label[:max_label_length]
        else:
            if tokenizer.padding_side == 'left':
                label = [tokenizer.pad_token_id] * (max_label_length - len(label)) + label
            else:
                label =  label + [tokenizer.pad_token_id] * (max_label_length - len(label)) 
        outputs['labels'].append(deepcopy(label))
    return outputs

def _tokenize_fn(args, string, tokenizer):
    """Tokenize a list of strings."""
    tokenized = tokenizer(
            string,
            return_tensors="pt",
            padding="longest",
            max_length=tokenizer.model_max_length,
            truncation=True,
        )
    input_ids = tokenized.input_ids[0]
    attention_mask = tokenized.attention_mask[0]
    # input_ids_lens = sum([input_id != tokenizer.pad_token_id for input_id in tokenized['input_ids'][0]])
    input_ids_lens = tokenized.input_ids.ne(tokenizer.pad_token_id).sum().item() 
    return dict(
        input_ids=input_ids.numpy().tolist(),
        input_ids_lens=input_ids_lens,
        attention_mask=attention_mask.numpy().tolist()
    )


def prepare_data_item(args, item, tokenizer=None):
    new_item = deepcopy(item)
    if tokenizer is not None:
        try:
            # examples, query = {'only_query':False}, {'only_query':True}
            examples = new_item['query'] + new_item['target']
            query = new_item['query']
            example_tokens, source_tokens = [
                _tokenize_fn(args, string, tokenizer) 
                for string in [examples, query]
            ]
            input_ids = example_tokens['input_ids']
            attention_mask = example_tokens['attention_mask']
            labels = deepcopy(input_ids)
            for i in range(source_tokens["input_ids_lens"]):
                labels[i] = IGNORE_INDEX
        except:
            raise ValueError(f"get tokenization error with {new_item}")
    return dict(
        input_ids=input_ids, 
        attention_mask=attention_mask, 
        labels=labels
    )


def load_text_dataset(args, data_path):
    print_rank_0("loading text dataset from: \n   {}".format(data_path))
    data_list = read_json_or_jsonl_data(data_path)
    outputs = []
    for item in data_list:
        # print_rank_0(item)
        if 'query' in item:
            query = str(item['query'])
        else:
            query = str(item['instruction'])  
        target = item['target']
        outputs.append({
            "query": query,
            "target": target,
            "query_id": item['query_id']
        })
    print(outputs[0])       
    print_rank_0("finished loading with {} data.".format(len(outputs)))
    return outputs
        
    
