########################################################################################################
# The RWKV Language Model - https://github.com/BlinkDL/RWKV-LM
########################################################################################################
import datetime
import json, logging
import os

import numpy as np
import torch
from torch.utils.data import Dataset
from tqdm import tqdm
from torch import nn

from .trainer import Trainer, TrainerConfig
from .model import GPT, GPTConfig
from .utils import set_seed

from app.admin.models import TModules
from mlwrite import db


class Dataset(Dataset):
    def __init__(self, data, model_level, ctx_len, epoch_length_fixed, label_id, base_path):
        vocab_path = base_path + 'vocab/'
        try:
            os.makedirs(vocab_path)
        except OSError:
            pass
        print('building token list...', end=' ')
        if model_level == 'word':
            import re
            data = re.sub(
                r'(\n|\.|\,|\?|\!|\:|\;|\-|\—|\||\'|\"|\`|\(|\)|[0-9]|\[|\]|\{|\}|\=|\+|\*|\\|\/|\~|\&|\$|\#|\%)',
                r' \g<0> ', data)
            data = re.sub(' +', ' ', data)
            print('splitting token...')
            data = data.lower().split(' ')
        unique = sorted(list(set(data)))
        xx = 0
        xxObj = {}
        for u in unique:
            xxObj[xx] = u
            xx += 1
        with open(base_path + 'vocab/vocab.json', "w", encoding="utf-16") as vocab_file:
            vocab_file.write(json.dumps(xxObj, ensure_ascii=False))
            module = TModules.query.filter_by(module_code=label_id).first()
            module.vocab_path = base_path + 'vocab/vocab.json'
            db.session.commit()

        data_size, vocab_size = len(data), len(unique)
        print('data has %d %ss, %d unique.' % (data_size, model_level, vocab_size))
        self.stoi = {ch: i for i, ch in enumerate(unique)}
        self.itos = {i: ch for i, ch in enumerate(unique)}
        self.ctx_len = ctx_len
        self.vocab_size = vocab_size
        self.data = data
        self.epoch_length_fixed = epoch_length_fixed

    def __len__(self):
        return self.epoch_length_fixed

    def __getitem__(self, idx):
        i = np.random.randint(0, len(self.data) - (self.ctx_len + 1))  # cheat: pick a random spot in dataset
        chunk = self.data[i:i + self.ctx_len + 1]
        dix = [self.stoi[s] for s in chunk]

        # try:
        x = torch.tensor(dix[:-1], dtype=torch.long)
        y = torch.tensor(dix[1:], dtype=torch.long)
        # except RuntimeError as err:
        #     print('Handling run-time error:', err)
        #     torch.cuda.memory_summary()
        #     del x, y
        # else:
        return x, y


def start_train(module):
    corpus_path = module.corpus_path
    batch_size = module.batch_size
    n_epoch = module.n_epoch
    epoch_save_path = module.base_path + 'model/'
    try:
        os.makedirs(epoch_save_path)
    except OSError:
        pass
    ctx_len = module.ctx_len
    label_id = module.module_code
    base_path = module.base_path
    epoch_save_frequency = module.epoch_save_frequency
    set_seed(42)
    np.set_printoptions(precision=4, suppress=True, linewidth=200)
    logging.basicConfig(format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%Y-%m-%d %H:%M:%S",
                        level=logging.INFO, )
    model_type = 'RWKV'
    datafile_encoding = 'utf-8'
    # epoch_save_frequency = 10  # 0 = never, 1 = every 'epoch', 2 = every two 'epoch', etc.
    # epoch_save_path = '/data/model/trained-'
    model_level = 'character'  # 'character' (recommended) or 'word'
    # ctx_len = 512  # context length, try 512 or 1024 if you have good GPU
    n_layer = 12  # try 12 for 100M, 24 for 300M
    n_head = 12  # try 12 for 100M, 16 for 300M
    n_embd = n_head * 64
    n_attn = n_embd
    n_ffn = n_embd
    lr_init = 6e-4 if model_type == 'RWKV' else 4e-4  # RWKV can use higher lr.  8e-4 = 0.0008   4e-4 = 0.0004
    lr_final = 4e-5
    betas = (0.9, 0.99) if model_type == 'RWKV' else (0.9, 0.99)
    eps = 4e-9
    weight_decay = 0 if model_type == 'RWKV' else 0.01  # wd is not useful when we have enough data
    epoch_length_fixed = 10000  # make an 'epoch' very short, so we can see the training progress
    rwkv_emb_scale = 0.4  # scale of initial embedding. 0.4 is a good choice
    rwkv_tiny_attn = 0  # 64 if (datafile_type == 0 and ctx_len > 600) else 0 # extra tiny attention dim, useful for long ctx char-level english
    rwkv_tiny_head = 1  # 1 is good enough. 8 is slow
    # n_side_proj = 512                                 # extra 'side projection', quite useful for BPE models

    # lg.info('loading data... ' + datafile)

    datafile = ''

    for file_path in tqdm(os.listdir(corpus_path)):
        file = os.path.join(corpus_path, file_path)
        with open(file, "r", encoding=datafile_encoding) as reader:
            datafile += reader.read()

    train_dataset = Dataset(datafile, model_level, ctx_len,
                            epoch_length_fixed, label_id=label_id, base_path=base_path)

    model = GPT(GPTConfig(train_dataset.vocab_size, train_dataset.ctx_len, model_type=model_type,
                          rwkv_emb_scale=rwkv_emb_scale, rwkv_tiny_attn=rwkv_tiny_attn, rwkv_tiny_head=rwkv_tiny_head,
                          n_layer=n_layer, n_head=n_head, n_embd=n_embd, n_attn=n_attn, n_ffn=n_ffn))

    if torch.cuda.device_count() > 1:
        print("Let's use", torch.cuda.device_count(), "GPUs!")
        model = nn.DataParallel(model)

    # load a trained model
    # model.load_state_dict(torch.load('trained-xxx.pth').state_dict())

    print('model', model_type, 'epoch', n_epoch, 'batchsz', batch_size, 'betas', betas, 'eps', eps, 'wd', weight_decay,
          'ctx', ctx_len, 'layer', n_layer, 'head', n_head, 'embd', n_embd, 'attn', n_attn, 'ffn', n_ffn)
    tconf = TrainerConfig(model_type=model_type, max_epochs=n_epoch, batch_size=batch_size, weight_decay=weight_decay,
                          learning_rate=lr_init, lr_decay=True, lr_final=lr_final, betas=betas, eps=eps,
                          warmup_tokens=0, final_tokens=n_epoch * len(train_dataset) * ctx_len, num_workers=0,
                          epoch_save_frequency=epoch_save_frequency, epoch_save_path=epoch_save_path)
    trainer = Trainer(model, train_dataset, None, tconf, label_id)

    trainer.train()
    model_save = epoch_save_path + 'trained-' + trainer.get_run_name() + '-' + datetime.datetime.today().strftime(
        '%Y-%m-%d-%H-%M-%S') + '.pth'
    torch.save(model, model_save)
    module.module_path = model_save
    module.last_train_time = datetime.datetime.now()
    module.train_num = module.train_num + 1
    module.status = 3
    db.session.commit()
