import torch
import argparse
from cs336_basics.LM import TransformerLM, AdamW, compute_cross_entropy, load_data, load_checkpoint, save_checkpoint
from cs336_basics.my_tokenizer import Tokenizer
import wandb
import numpy as np
import os
from loguru import logger as console_logger
from tqdm import tqdm
from einops import repeat, rearrange
import sys
from collections import defaultdict


def parse_args():
    parse = argparse.ArgumentParser()
    # 训练超参数
    parse.add_argument('--epoch', default=1, type=int)
    parse.add_argument('--batch_size', default=32, type=int)

    # Transformer 参数
    parse.add_argument('--d_model', default=512, type=int)
    parse.add_argument('--num_heads', default=16, type=int)
    parse.add_argument('--num_layers', default=4, type=int)
    parse.add_argument('--d_ff', default=1344, type=int)
    parse.add_argument('--context_length', default=256, type=int)
    parse.add_argument('--rope_theta', default=10000.0, type=float)
    parse.add_argument('--vocab_size', default=10_000, type=int)

    # 优化器参数
    parse.add_argument('--lr', default=1e-4, type=float)
    parse.add_argument('--weight_decay', default=0.01, type=float)
    parse.add_argument('--beta1', default=0.9, type=float)
    parse.add_argument('--beta2', default=0.9, type=float)
    parse.add_argument('--adamW_eps', default=1e-8, type=float)

    # 日志参数
    parse.add_argument('--report_to', default=['wandb', 'console'], type=list[str])
    parse.add_argument('--log_interval', default=100, type=int)

    parse.add_argument('--use_cuda', action='store_true')
    parse.add_argument('--checkpoint_dir', default='./saved', type=str)

    # 训练数据参数
    parse.add_argument('--train_data_path', type=str, default='data/TinyStoriesV2-GPT4-train.txt')
    parse.add_argument('--val_ratio', default=0.05, type=float)
    
    # 分词器
    parse.add_argument('--tokenizer_path', default='./data', type=str)
    args = parse.parse_args() 
    
    return args


class MyDataLoader:
    def __init__(self, data_path: str, chunk_size: int, val_ratio: float):

        self.fp = np.memmap(data_path, mode='r')
        self.data_path = data_path
        self.chunk_size = chunk_size
        self.max_index = self.fp.size // chunk_size
        self.avail_index = list(range(self.max_index))

        val_num = int(len(self.avail_index) * val_ratio)

        self.val_index = self.avail_index[:val_num]
        self.train_index = self.avail_index[val_num:]
    
    def get_train(self):
        for i in self.train_index:
            yield bytes(self.fp[i * self.chunk_size: (i + 1) * self.chunk_size])

    def get_val(self):
        for i in self.val_index:
            yield bytes(self.fp[i * self.chunk_size: (i + 1) * self.chunk_size])

class MyLogger:
    def __init__(self, report_to: list[str], args: argparse.Namespace):
        self.console_logger = None
        self.wandb_logger = None
        for item in report_to:
            if item == 'console':
                self.console_logger = console_logger
            elif item == 'wandb':
                wandb_logger = wandb.init(entity='lioner-kiss', project='trian-lm', config=args)
                self.wandb_logger = wandb_logger
            else:
                raise NotImplementedError

    def log(self, info_dict: dict):
        mean_dict = {k:round(sum(v)/len(v), 2) for k,v in info_dict.items()}
        if self.console_logger is not None:
            self.console_logger.info(mean_dict)
        if self.wandb_logger is not None:
            self.wandb_logger.log(mean_dict)



def train(args: argparse.Namespace):

    device = 'cuda:0' if torch.cuda.is_available() and args.use_cuda else 'cpu'
    chunk_size = args.context_length * args.batch_size * 8

    tokenizer = Tokenizer.from_files(os.path.join(args.tokenizer_path, 'vocab.json'), os.path.join(args.tokenizer_path, 'merges.txt'))
    model = TransformerLM(args.vocab_size, args.context_length, args.d_model, args.num_layers, args.num_heads, args.d_ff, args.rope_theta).to(device)
    optimizer = AdamW(model.parameters(), args.lr, args.weight_decay, (args.beta1, args.beta2), args.adamW_eps)
    dataloader = MyDataLoader(args.train_data_path, chunk_size=chunk_size, val_ratio=args.val_ratio)

    logger = MyLogger(args.report_to, args)
    with tqdm(total=args.epoch * dataloader.max_index, mininterval=0.3) as pbar:
        step = 0
        stats = defaultdict(list)
        for i in range(args.epoch):
            for chunk_bytes in dataloader.get_train():
                chunk_str = chunk_bytes.decode('utf-8', errors='ignore')
                token_ids = tokenizer.encode(chunk_str, True)
                source, target = load_data(token_ids, args.batch_size, args.context_length, device=device)
                logits = model(source, torch.triu(torch.ones((source.shape[0], source.shape[1], source.shape[1]), device=device), diagonal=1) == 0, 
                            rearrange(torch.arange(args.context_length, device=device), "seq -> 1 seq"))
                loss = compute_cross_entropy(logits, target)
                optimizer.zero_grad()
                loss.backward()
                optimizer.step()

                
                del chunk_bytes
                stats['loss'].append(loss.item())
                pbar.update(1)
                step += 1

                if step % args.log_interval == 0:
                    logger.log(stats)
                    stats.clear()


if __name__ == '__main__':
    sys.argv.append('--use_cuda')
    args = parse_args()
    train(args)