import json
import math
import os

import numpy as np
import torch
import logging
import pandas as pd
from sklearn.metrics import confusion_matrix
from torch import nn
from torch.utils.data import DataLoader
from MyUtil import try_gpu, get_save_dir, get_best_file, CustomJSONEncoder
from MyDataset import WindIcingDatasetV1, MyDataloader, WindIcingDatasetV2, SelectedFeatures, WindIcingDatasetV3, \
    WindIcingDatasetV4
from sklearn.model_selection import train_test_split
from pathlib import Path
from MyUtil import check_and_create_path, MyLoggerV1, get_logger, metrics_function_v1
from MyModel import WindIcingModelV1, WindIcingModelV2, WindIcingModelV3, WindIcingModelV4
from MyTrainer import WindIcingTrainerV1, WindIcingTrainerV2, WindIcingTrainerV3, WindIcingTrainerV4, WindIcingTrainerV5


def get_dataset(config):
    dataset_v = config['dataset_v']
    # 数据集类
    if dataset_v == 1:
        dataset_class = WindIcingDatasetV1
    elif dataset_v == 2:
        dataset_class = WindIcingDatasetV2
    elif dataset_v == 3:
        dataset_class = WindIcingDatasetV3
    elif dataset_v == 4:
        dataset_class = WindIcingDatasetV4
    else:
        assert False
    
    # 选择的特征
    select_columns = config["select_columns"]
    experiment_type = config["experiment_type"]
    if experiment_type == "one":
        num1 = config["num"]
        if num1 == 15:
            num2 = 21
        elif num1 == 21:
            num2 = 15
        else:
            assert False
        
        (train_loader, train_size), (valid_loader, valid_size), _ = MyDataloader.get_dataloader_one(
            dataset_class, config, num1, select_columns=select_columns, filter=config["filter"])
        _, _, (test_loader, test_size) = MyDataloader.get_dataloader_one(
            dataset_class, config, num2, select_columns=select_columns, filter=config["filter"], test_size=0)
    elif experiment_type == "union":
        (train_loader, train_size), (valid_loader, valid_size), (
            test_loader, test_size) = MyDataloader.get_dataloader_union(
            dataset_class, config, select_columns=select_columns, filter=config["filter"])
    elif experiment_type == "ratio":
        (train_loader, train_size), (valid_loader, valid_size), (
            test_loader, test_size) = MyDataloader.get_dataloader_ratio(
            dataset_class, config, train_size_15=config['train_size_15'], train_size_21=config['train_size_21'],
            test_size_21=config['test_size_21'],
            select_columns=select_columns, filter=config["filter"]
        )
    else:
        assert False
    
    config["data_dict"] = dict(
        # train_loader=train_loader,
        # valid_loader=valid_loader,
        # test_loader=test_loader,
        train_size=train_size,
        valid_size=valid_size,
        test_size=test_size,
        dataset_class=dataset_class,
    )
    # 获取输入张量部分形状
    for (inputs, _), _ in train_loader:
        config['feature_dim'] = inputs.shape[-1]
        break
    return train_loader, valid_loader, test_loader
        
def get_model(config, train_loader):
    
    v = config['model_v']
    # train_loader = config["data_dict"]['train_loader']
    device = config['device']
    if v == 1:
        model = WindIcingModelV1(
            seq_len=config['seq_len'],
            window_size=config['window_size'],
            feature_dim=config['feature_dim'],
            device=device,
        )
        model.to(device)
        model.eval()
        with torch.no_grad():
            for (inputs, _), _ in train_loader:
                inputs = inputs.to(device)
                model.my_forward(inputs, debug=True)
                break
    elif v == 2:
        model = WindIcingModelV2(
            seq_len=config['seq_len'], window_size=config['window_size'], feature_dim=config['feature_dim'],
            h_dim=config['model_config']['h_dim'],
            d_model1=config['model_config']['d_model1'], n_head1=config['model_config']['n_head1'],
            dim_feedforward1=config['model_config']['dim_feedforward1'],
            num_layers1=config['model_config']['num_layers1'],
            d_model2=config['model_config']['d_model2'], n_head2=config['model_config']['n_head2'],
            dim_feedforward2=config['model_config']['dim_feedforward2'],
            num_layers2=config['model_config']['num_layers2'],
            dropout=config['model_config']['dropout'],
            device=device
        )
        model.to(device)
        model.eval()
        with torch.no_grad():
            for (inputs, _), _ in train_loader:
                inputs = inputs.to(device)
                model.my_forward(inputs, debug=True)
                break
    elif v == 3:
        assert config["dataset_v"] == 3, "模型3只有和数据集3才能匹配"
        model = WindIcingModelV3(
            seq_len=config['seq_len'], window_size=config['window_size'], feature_dim=config['feature_dim'],
            h_dim=config['model_config']['h_dim'],
            d_model1=config['model_config']['d_model1'], n_head1=config['model_config']['n_head1'],
            dim_feedforward1=config['model_config']['dim_feedforward1'],
            num_layers1=config['model_config']['num_layers1'],
            d_model2=config['model_config']['d_model2'], n_head2=config['model_config']['n_head2'],
            dim_feedforward2=config['model_config']['dim_feedforward2'],
            num_layers2=config['model_config']['num_layers2'],
            dropout=config['model_config']['dropout'],
            device=device
        )
        model.to(device)
        model.eval()
        with torch.no_grad():
            for (inputs, power), _ in train_loader:
                inputs = inputs.to(device)
                power = power.to(device)
                model.my_forward(inputs, power, debug=True)
                break
    elif v == 4:
        assert config["dataset_v"] == 4, "模型4只有和数据集4才能匹配"
        model = WindIcingModelV4(
            seq_len=config['seq_len'], window_size=config['window_size'], feature_dim=config['feature_dim'],
            h_dim=config['model_config']['h_dim'],
            d_model1=config['model_config']['d_model1'], n_head1=config['model_config']['n_head1'],
            dim_feedforward1=config['model_config']['dim_feedforward1'],
            num_layers1=config['model_config']['num_layers1'],
            d_model2=config['model_config']['d_model2'], n_head2=config['model_config']['n_head2'],
            dim_feedforward2=config['model_config']['dim_feedforward2'],
            num_layers2=config['model_config']['num_layers2'],
            dropout=config['model_config']['dropout'],
            device=device
        )
        model.to(device)
        model.eval()
        with torch.no_grad():
            for (inputs, (powers, power_features)), _ in train_loader:
                inputs = inputs.to(device)
                powers = powers.to(device)
                power_features = power_features.to(device)
                model.my_forward(inputs, (powers, power_features), debug=True)
                break
    else:
        assert False
    logger = get_logger(config["no"])
    logger.info(f'Model Type: {model.__class__}')
    
    def init_weights(module):
        if hasattr(module, 'weight') and module.weight.dim() > 1:
            nn.init.xavier_uniform_(module.weight)
    
    model.apply(init_weights)
    return model
    
def get_trainer(model, config, train_loader, valid_loader):
    dataset_v = config['dataset_v']
    model_v = config['model_v']
    trainer_v = config["trainer_v"]
    
    # train_loader = config["data_dict"]["train_loader"]
    # valid_loader = config["data_dict"]["valid_loader"]
    
    optimizer = config["optimizer"]
    criterion = config["criterion"]
    device = config['device']
    
    if trainer_v == 1:
        assert model_v == 1, "训练器1只能和模型1匹配"
        trainer = WindIcingTrainerV1(
            model=model,
            train_loader=train_loader,
            valid_loader=valid_loader,
            optimizer=optimizer,
            criterion=criterion,
            config=config,
            device=device,
            
            metrics_function=metrics_function_v1
        )
    elif trainer_v == 2:
        assert model_v == 2, "训练器2只能和模型2匹配"
        trainer = WindIcingTrainerV2(
            model=model,
            train_loader=train_loader,
            valid_loader=valid_loader,
            optimizer=optimizer,
            criterion=criterion,
            config=config,
            device=device,
            
            metrics_function=metrics_function_v1
        )
    elif trainer_v == 3:
        assert model_v == 3, "训练器3只能和模型3匹配"
        assert dataset_v == 3, "训练器3只能和数据集3匹配"
        trainer = WindIcingTrainerV3(
            model=model,
            train_loader=train_loader,
            valid_loader=valid_loader,
            optimizer=optimizer,
            criterion=criterion,
            config=config,
            device=device,
            
            metrics_function=metrics_function_v1
        )
    elif trainer_v == 4:
        assert model_v == 4, "训练器4只能和模型4匹配"
        assert dataset_v == 4, "训练器4只能和数据集4匹配"
        trainer = WindIcingTrainerV4(
            model=model,
            train_loader=train_loader,
            valid_loader=valid_loader,
            optimizer=optimizer,
            criterion=criterion,
            config=config,
            device=device,
            
            metrics_function=metrics_function_v1
        )
    elif trainer_v == 5:
        assert model_v == 3, "训练器5只能和模型3匹配"
        assert dataset_v == 3, "训练器5只能和数据集3匹配"
        trainer = WindIcingTrainerV5(
            model=model,
            train_loader=train_loader,
            valid_loader=valid_loader,
            optimizer=optimizer,
            criterion=criterion,
            config=config,
            device=device,
            
            metrics_function=metrics_function_v1
        )
    else:
        assert False
    logger = get_logger(config["no"])
    logger.info(f'Trainer Type: {trainer.__class__}')
    return trainer

def save_config_json(config):
    no = config["no"]
    json_path = f'../logs/{no}/config.json'
    with open(json_path, 'w') as file:
        json.dump(config, file, indent=4, cls=CustomJSONEncoder, ensure_ascii=False)

no = 62

if __name__ == '__main__':
    # 实验编号
    
    config = dict(
        no=no,
        comment='V3数据集，预测风速 + 机理约束（结冰时预测值大于真实值）,原始特征',
        comment2='15 - 21 ',
        
        # 数据集、模型、训练器、实验场景和特征设置
        dataset_v=3,
        model_v=3,
        trainer_v=5,
        experiment_type="ratio", num=15, filter=True,
        # 选择的特征
        select_columns=SelectedFeatures.original_columns,
        
        # 数据集相关设置
        batch_size=256, shuffle=True,
        seq_len=10, seq_step_size=10, window_size=64, step_size=1,
        # 加载数据集相关设置
        train_size_15=0.8, train_size_21=0.2, test_size_21=0.5,
        
        # 训练相关设置
        epochs=300, lr=0.0001,
        checkpoint_step=25,
        warmup_epochs=15, warmup_loss=0.01,
        early_stopping=True, patience=30,
        resume=False, resume_path=f'../checkpoints/{no}/best.pth',
        power_lambda=0.5,
        
        tester=True,
        
        # 模型相关设置
        model_config=dict(
            h_dim=128,
            d_model1=256, n_head1=4, dim_feedforward1=256, num_layers1=4,
            d_model2=256, n_head2=4, dim_feedforward2=256, num_layers2=4,
            dropout=0.1,
        ),
        
        # 硬件相关
        gpu_no=0
    )
    config['save_dir'] = get_save_dir(no)
    os.makedirs(config['save_dir'], exist_ok=True)
    
    logger = get_logger(config['no'])
    
    device = try_gpu(config["gpu_no"])
    config["device"] = device
    logger.info(f'training on device: {device}')
    

    train_loader, valid_loader, test_loader = get_dataset(config)

    logger.info(config)

    model = get_model(config, train_loader)
    optimizer = torch.optim.Adam(model.parameters(), lr=config['lr'])
    criterion = torch.nn.CrossEntropyLoss(reduction="sum")
    
    config["model_name"] = model.__class__.__name__
    config["optimizer"] = optimizer
    config["criterion"] = criterion
    
    trainer = get_trainer(model, config, train_loader, valid_loader)

    save_config_json(config)

    
    trainer.train()
    
    if config["tester"]:
        checkpoint_path = get_best_file(no)
        logger.info(f'load checkpoint from {checkpoint_path}')
        checkpoint = torch.load(checkpoint_path)
        
        model.load_state_dict(checkpoint['state_dict'])
        
        trainer.test(test_loader=test_loader)
