import argparse
import json
import socketserver
import sys
import threading
from io import StringIO
from threading import Thread

import numpy as np
import torch
import os
import pytorch_lightning as pl
from pytorch_lightning.tuner import Tuner

import wandb
from pytorch_lightning import Trainer, Callback
from pytorch_lightning.callbacks import ModelCheckpoint, TQDMProgressBar
from pytorch_lightning.callbacks.progress.tqdm_progress import Tqdm
from pytorch_lightning.loggers import WandbLogger, TensorBoardLogger

from config.config import Config
from data.data import ImcsDacDataModule
from models.imcs_dac import ImcsDacModule

class MyTQDMProgressBar(TQDMProgressBar):

    def __init__(self):
        super().__init__()

    def init_validation_tqdm(self):
        return Tqdm(
            desc=self.validation_description,
            position=0,  # 这里固定写0
            disable=self.is_disabled,
            leave=True,  # leave写True
            dynamic_ncols=True,
            file=sys.stdout,
        )


class LearningRateMonitor(Callback):
    def on_train_epoch_end(self, trainer: "pl.Trainer", pl_module: "pl.LightningModule") -> None:
        lr = [[pg['lr'] for pg in opt.param_groups] for opt in trainer.optimizers]
        lr = np.unique(np.reshape(lr, (-1)))

        print(f"lr = {lr}")


class DelayModelCheckpoint(ModelCheckpoint):
    def __init__(self, delay_epochs: int, *args, **kwargs):
        super().__init__(*args, **kwargs)
        self.delay_epochs = delay_epochs

    def _should_skip_saving_checkpoint(self, trainer: "pl.Trainer") -> bool:
        if trainer.current_epoch < self.delay_epochs:
            return True
        else:
            return super()._should_skip_saving_checkpoint(trainer)


class MyServer(socketserver.BaseRequestHandler):
    def handle(self):
        # print("conn is :", self.request)  # conn
        # print("addr is :", self.client_address)  # addr

        buffer = ""

        while True:
            try:
                # 收消息
                packet = self.request.recv(1024)
                if not packet:
                    break

                message = packet.decode()
                if len(message) == 0:
                    continue

                if message == ".":
                    temp_output = StringIO()
                    sys.stdout = temp_output
                    try:
                        exec(buffer.replace('#', ' '))
                    finally:
                        sys.stdout = sys.__stdout__
                        buffer = ""

                    response = temp_output.getvalue()
                    self.request.sendall(str(response + "\n").encode())
                else:
                    buffer = buffer + message + "\r\n"

            except Exception as e:
                sys.stdout = sys.__stdout__
                print(e)


if __name__ == '__main__':
    config = Config()

    parser = argparse.ArgumentParser(prog='train')
    parser.add_argument('--gradient_accumulation_steps', type=int, default=config.gradient_accumulation_steps)
    parser.add_argument('--learning_rate', type=float, default=config.learning_rate)
    parser.add_argument('--num_attention_heads', type=int, default=config.num_attention_heads)
    parser.add_argument('--num_attention_layers', type=int, default=config.num_attention_layers)
    parser.add_argument('--num_feed_forward_layers', type=int, default=config.num_feed_forward_layers)
    parser.add_argument('--optimizer', default=config.optimizer)
    parser.add_argument('--position_embedding_type', default=config.position_embedding_type)
    parser.add_argument('--weight_decay', type=float, default=config.weight_decay)
    args = parser.parse_args()

    torch.set_float32_matmul_precision('high')
    datamodule = ImcsDacDataModule(config, tokenizer=config.tokenizer)

    config.label_weights = datamodule.label_weights().to(config.device)
    config.label_smoothing = 0.1
    config.num_train_samples = len(datamodule.train_dataset)
    config.steps_per_epoch = config.num_train_samples / config.gradient_accumulation_steps
    config.gradient_accumulation_steps = args.gradient_accumulation_steps
    config.learning_rate = args.learning_rate
    config.num_attention_heads = args.num_attention_heads
    config.num_attention_layers = args.num_attention_layers
    config.num_feed_forward_layers = args.num_feed_forward_layers
    config.optimizer = args.optimizer
    config.position_embedding_type = args.position_embedding_type
    config.weight_decay = args.weight_decay

    module = ImcsDacModule(config)
    config.update_save_path(module)

    checkpoint = DelayModelCheckpoint(delay_epochs=100,
                                      dirpath=config.save_path,
                                      save_on_train_epoch_end=True,
                                      every_n_epochs=1,
                                      save_last=True,
                                      monitor='val_loss',
                                      mode='min',
                                      save_top_k=3,
                                      filename='{epoch}-{val_loss:.3f}-{val_acc:.3f}',
                                      auto_insert_metric_name=True)

    ckpt_path = config.find_last_ckpt_path()
    if config.train_resume_mode == "model" and ckpt_path is not None:
        module = ImcsDacModule.load_from_checkpoint(ckpt_path, config=config)
    else:
        module = ImcsDacModule(config)

    # logger = TensorBoardLogger('logs/baseline', name='weight_declay=1e-3')
    logger = WandbLogger(name="baseline", project="imcs-dac")

    trainer = Trainer(
        accelerator=config.device,
        max_epochs=config.num_epochs,
        accumulate_grad_batches=config.gradient_accumulation_steps,
        num_sanity_val_steps=0,
        enable_checkpointing=True,
        default_root_dir=config.save_path,
        logger=logger,
        callbacks=[LearningRateMonitor(), checkpoint, MyTQDMProgressBar()]
    )

    # server = socketserver.ThreadingTCPServer(('localhost', 8989), MyServer)
    # threading.Thread(target=lambda: server.serve_forever()).start()

    trainer.fit(module, datamodule=datamodule, ckpt_path=ckpt_path if config.train_resume_mode == "checkpoint" else None)
