import logging
import os
import pickle
import time
import numpy as np
from tqdm import tqdm
import torch
from filelock import FileLock
from torch.utils.data.dataset import Dataset
import random

from ...tokenization_utils import PreTrainedTokenizer
from ...training_args import TrainingArguments
from ...modeling_utils import PreTrainedModel

def set_seed(seed: int):
    random.seed(seed)
    np.random.seed(seed)
    torch.manual_seed(seed)
    torch.cuda.manual_seed_all(seed)


logger = logging.getLogger(__name__)

class LMRecorder:
    model:PreTrainedModel

    def __init__(self, model:PreTrainedModel, args:TrainingArguments):
        self.model = model.to(args.device)
        self.args = args

        set_seed(self.args.seed)
    
    def predict(self, mask_batch):
        model = self.model
        if self.args.n_gpu > 1:
            model = torch.nn.DataParallel(model)
        else:
            model = self.model
        for k,v in mask_batch.items():
            mask_batch[k] = v.to(self.args.device)
        with torch.no_grad():
            # print(mask_batch)
            mask_batch["return_tuple"]=True
            outputs = model(**mask_batch)
            # print(outputs)
            logits = outputs[0]
        
        preds = logits.detach().cpu()
        return preds

class TextDataset(Dataset):
    """
    This will be superseded by a framework-agnostic approach
    soon.
    """

    def __init__(
        self, tokenizer: PreTrainedTokenizer, file_path: str, block_size: int, overwrite_cache=False,
    ):
        assert os.path.isfile(file_path)

        block_size = block_size - tokenizer.num_special_tokens_to_add(pair=False)

        directory, filename = os.path.split(file_path)
        cached_features_file = os.path.join(
            directory, "cached_lm_{}_{}_{}".format(tokenizer.__class__.__name__, str(block_size), filename,),
        )

        # Make sure only the first process in distributed training processes the dataset,
        # and the others will use the cache.
        lock_path = cached_features_file + ".lock"
        with FileLock(lock_path):

            if os.path.exists(cached_features_file) and not overwrite_cache:
                start = time.time()
                with open(cached_features_file, "rb") as handle:
                    self.examples = pickle.load(handle)
                logger.info(
                    f"Loading features from cached file {cached_features_file} [took %.3f s]", time.time() - start
                )

            else:
                logger.info(f"Creating features from dataset file at {directory}")

                self.examples = []
                with open(file_path, encoding="utf-8") as f:
                    text = f.read()

                tokenized_text = tokenizer.convert_tokens_to_ids(tokenizer.tokenize(text))

                for i in range(0, len(tokenized_text) - block_size + 1, block_size):  # Truncate in block of block_size
                    self.examples.append(
                        tokenizer.build_inputs_with_special_tokens(tokenized_text[i : i + block_size])
                    )
                # Note that we are losing the last truncated example here for the sake of simplicity (no padding)
                # If your dataset is small, first you should loook for a bigger one :-) and second you
                # can change this behavior by adding (model specific) padding.

                start = time.time()
                with open(cached_features_file, "wb") as handle:
                    pickle.dump(self.examples, handle, protocol=pickle.HIGHEST_PROTOCOL)
                logger.info(
                    "Saving features into cached file %s [took %.3f s]", cached_features_file, time.time() - start
                )

    def __len__(self):
        return len(self.examples)

    def __getitem__(self, i) -> torch.Tensor:
        return torch.tensor(self.examples[i], dtype=torch.long)


class LineByLineTextDataset(Dataset):
    """
    This will be superseded by a framework-agnostic approach
    soon.
    """

    def __init__(self, tokenizer: PreTrainedTokenizer, file_path: str, block_size: int, cache_dir=None):
        cached_features_file = os.path.join(
            cache_dir if cache_dir is not None else None,
            "cached_{}".format(
                  "select_lm",
            ),
        )
        lock_path = cached_features_file + ".lock"
    
        if os.path.exists(cached_features_file) :
            start = time.time()
            with open(cached_features_file, "rb") as f:
                self.examples = pickle.load(f)
            logger.info(
                f"Loading features from cached file {cached_features_file} [took %.3f s]", time.time() - start
            )    
        else:
            assert os.path.isfile(file_path)
            # Here, we do not cache the features, operating under the assumption
            # that we will soon use fast multithreaded tokenizers from the
            # `tokenizers` repo everywhere =)
            logger.info("Creating features from dataset file at %s", file_path)

            with open(file_path, encoding="utf-8") as f:
                lines = [line for line in f.read().splitlines() if (len(line) > 0 and not line.isspace())]

            batch_encoding = tokenizer(lines, add_special_tokens=True, truncation=True, max_length=block_size)
            self.examples = batch_encoding["input_ids"]
            start = time.time()
            with open(cached_features_file, "wb") as handle:
                    pickle.dump(self.examples, handle, protocol=pickle.HIGHEST_PROTOCOL)
            logger.info(
                "Saving features into cached file %s [took %.3f s]", cached_features_file, time.time() - start
            )
    def __len__(self):
        return len(self.examples)

    def __getitem__(self, i) -> torch.Tensor:
        return torch.tensor(self.examples[i], dtype=torch.long)



class MixDataset(Dataset):
    """
    This will be superseded by a framework-agnostic approach
    soon.
    """

    def __init__(self, tokenizer: PreTrainedTokenizer, file_path: str, block_size: int, cache_dir=None):
        cached_features_file = os.path.join(
            cache_dir if cache_dir is not None else None,
            "cached_{}".format(
                  "mix_lm",
            ),
        )
        lock_path = cached_features_file + ".lock"
    
        if os.path.exists(cached_features_file) :
            start = time.time()
            with open(cached_features_file, "rb") as f:

                dump_data = pickle.load(f)
                self.examples = dump_data[0]
                self.labels = dump_data[1]
            logger.info(
                f"Loading features from cached file {cached_features_file} [took %.3f s]", time.time() - start
            )    
        else:
            assert os.path.isfile(file_path)
            # Here, we do not cache the features, operating under the assumption
            # that we will soon use fast multithreaded tokenizers from the
            # `tokenizers` repo everywhere =)
            logger.info("Creating features from dataset file at %s", file_path)

            lines = []
            labels = []
            with open(file_path, "r", encoding="utf-8") as f:
                for line in tqdm(f.readlines()):
                    line = line.strip()
                    if line.isspace():
                        continue
                    line = line.split("\t")
                    if len(line) != 2 :
                        continue
                    # print(line)
                    text = line[0]
                    label = line[1]
                    lines.append(text)
                    labels.append(label)
            # print(lines[0])
            batch_encoding = tokenizer(lines, add_special_tokens=True, truncation=True, max_length=block_size)
            self.examples = batch_encoding["input_ids"]
            self.labels = labels
            start = time.time()
            with open(cached_features_file, "wb") as handle:
                dump_data = [self.examples, self.labels]
                pickle.dump(dump_data, handle, protocol=pickle.HIGHEST_PROTOCOL)
            logger.info(
                "Saving features into cached file %s [took %.3f s]", cached_features_file, time.time() - start
            )
    def __len__(self):
        return len(self.examples)

    def __getitem__(self, i) -> torch.Tensor:
        item_dict = {
            "input_ids":torch.tensor(self.examples[i], dtype=torch.long),
            "labels": torch.tensor(int(self.labels[i]), dtype=torch.long)
        }
        return item_dict

class DomainEmbeddingDataset(Dataset):
    """
    This will be superseded by a framework-agnostic approach
    soon.
    """

    def __init__(self, tokenizer: PreTrainedTokenizer, file_path: str, block_size: int, cache_dir=None):
        cached_features_file = os.path.join(
            cache_dir if cache_dir is not None else None,
            "cached_{}".format(
                  "mix_lm",
            ),
        )
        lock_path = cached_features_file + ".lock"
    
        if os.path.exists(cached_features_file) :
            start = time.time()
            with open(cached_features_file, "rb") as f:

                dump_data = pickle.load(f)
                self.examples = dump_data[0]
                self.labels = dump_data[1]
            logger.info(
                f"Loading features from cached file {cached_features_file} [took %.3f s]", time.time() - start
            )    
        else:
            assert os.path.isfile(file_path)
            # Here, we do not cache the features, operating under the assumption
            # that we will soon use fast multithreaded tokenizers from the
            # `tokenizers` repo everywhere =)
            logger.info("Creating features from dataset file at %s", file_path)

            lines = []
            labels = []
            with open(file_path, "r", encoding="utf-8") as f:
                for line in tqdm(f.readlines()):
                    line = line.strip()
                    if line.isspace():
                        continue
                    line = line.split("\t")
                    if len(line) != 2 :
                        continue
                    # print(line)
                    text = line[0]
                    label = line[1]
                    lines.append(text)
                    labels.append(label)
            # print(lines[0])
            batch_encoding = tokenizer(lines, add_special_tokens=True, truncation=True, max_length=block_size)
            self.examples = batch_encoding["input_ids"]
            self.labels = labels
            start = time.time()
            with open(cached_features_file, "wb") as handle:
                dump_data = [self.examples, self.labels]
                pickle.dump(dump_data, handle, protocol=pickle.HIGHEST_PROTOCOL)
            logger.info(
                "Saving features into cached file %s [took %.3f s]", cached_features_file, time.time() - start
            )
    def __len__(self):
        return len(self.examples)

    def __getitem__(self, i) -> torch.Tensor:
        item_dict = {
            "input_ids":torch.tensor(self.examples[i], dtype=torch.long),
            "labels": torch.tensor(int(self.labels[i]), dtype=torch.long)
        }
        return item_dict
        