"""
data utils
"""
import numpy as np
import tiktoken
import torch
from path import *


def preprocessing_raw_data(data_name, encoding_method, train_val_split=0.9, preprocessed_data_name=''):
    raw_data_path = get_raw_data_path(data_name)
    with open(raw_data_path, 'r') as f:
        raw_data = f.read()
    print(f"length of dataset in characters: {len(raw_data):,}")

    # get all the unique characters that occur in this text
    chars = sorted(list(set(raw_data)))
    vocab_size = len(chars)
    print("all the unique characters:", ''.join(chars))
    print(f"vocab size: {vocab_size:,}")

    # create the train and test splits
    n = len(raw_data)
    train_data = raw_data[:int(n * train_val_split)]
    val_data = raw_data[int(n * train_val_split):]

    if len(preprocessed_data_name) == 0:
        trainset_name = 'train.bin'
        valset_name = 'test.bin'
    else:
        trainset_name = f'{preprocessed_data_name}_train.bin'
        valset_name = f'{preprocessed_data_name}_test.bin'

    if encoding_method == 'character':

        # create a mapping from characters to integers
        stoi = {ch: i for i, ch in enumerate(chars)}
        itos = {i: ch for i, ch in enumerate(chars)}
        def _encode(s):
            return [stoi[c] for c in s]  # encoder: take a string, output a list of integers

        def _decode(l):
            return ''.join([itos[i] for i in l])  # decoder: take a list of integers, output a string

        # encode both to integers
        train_ids = _encode(train_data)
        val_ids = _encode(val_data)
        print(f"train has {len(train_ids):,} tokens")
        print(f"val has {len(val_ids):,} tokens")

    elif encoding_method == 'gpt2':
        # encode with tiktoken gpt2 bpe
        enc = tiktoken.get_encoding("gpt2")
        train_ids = enc.encode_ordinary(train_data)
        val_ids = enc.encode_ordinary(val_data)
        print(f"train has {len(train_ids):,} tokens")
        print(f"val has {len(val_ids):,} tokens")

    else:
        raise NotImplementedError

    train_ids = np.array(train_ids, dtype=np.uint16)
    val_ids = np.array(val_ids, dtype=np.uint16)
    train_ids.tofile(get_preprocessed_data_path(trainset_name))
    val_ids.tofile(get_preprocessed_data_path(valset_name))


def get_batch(block_size, batch_size, split, data_name='', device='cpu'):
    # We recreate np.memmap every batch to avoid a memory leak, as per
    # https://stackoverflow.com/questions/45132940/numpy-memmap-memory-usage-want-to-iterate-once/61472122#61472122

    if len(data_name) == 0:
        trainset_name = 'train.bin'
        valset_name = 'test.bin'
    else:
        trainset_name = f'{data_name}_train.bin'
        valset_name = f'{data_name}_test.bin'

    if split == 'train':
        data = np.memmap(get_preprocessed_data_path(trainset_name), dtype=np.uint16, mode='r')
    else:
        data = np.memmap(get_preprocessed_data_path(valset_name), dtype=np.uint16, mode='r')
    ix = torch.randint(len(data) - block_size, (batch_size,))
    x = torch.stack([torch.from_numpy((data[i:i+block_size]).astype(np.int64)) for i in ix])
    y = torch.stack([torch.from_numpy((data[i+1:i+1+block_size]).astype(np.int64)) for i in ix])

    # load data into device
    if 'cuda' in device:
        # pin arrays x,y, which allows us to move them to GPU asynchronously (non_blocking=True)
        x, y = x.pin_memory().to(device, non_blocking=True), y.pin_memory().to(device, non_blocking=True)
    else:
        x, y = x.to(device), y.to(device)
    return x, y

