import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import argparse

import random
import os
import torch
from torch.utils.data import Dataset, DataLoader
from torch.utils.tensorboard import SummaryWriter
from torchvision import transforms

from model.S4Max import S4Max


device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
random.seed(42)

# # 1～24指代不同的列
# 1 年份数据 比如2007,2008 
# 2 月份数据 比如 1 2 3 有效范围是1～12
# 3 日数据   比如 1 2 3 有效范围是1～31
# 4 小时数据 比如 1.3 3.4 有效范围是0～24
# 5 地理纬度数据 比如 -30, 30, 60 有效范围是-90～90
# 6 地理经度数据 比如 -120 120  有效范围是-180～180
# 7 地理高度数据 比如 80km 100km 有效范围是80～130
# ###
# # 前7个feature每个sample都是有数据的 不存在nan情况
# # 后面的feature是不同高度下的风速，这是卫星观测的数据 存在nan情况

# 8 高度为91.33km出的风速数据 比如 0.1m/s -0.2m/s
# 9 高度为94.27km出的风速数据
# 10～23 均为不同高度的风速数据 高度越来越高
# ###
# # 8～23 的风速高度分别为：[ 91.3341588 94.27706017 97.21078712 100.13536825 103.05083208 105.95720702 108.85452142 111.74280351 114.62208146 117.49238334 120.35373713 123.20617072 126.04971193 128.88438847 131.71022798 134.52725801]
# # 8～23 这 16 个feature中可能有nan的情况 甚至在较高的高度很多都是nan 

# 24  S4max。卫星观测的数值，就是想要建模预测的label。这是表征大气中电子波动的参数 叫S4max index，范围是0～5


# set parameters
def configs():
    parser = argparse.ArgumentParser()
    # data (不要改)
    parser.add_argument("--data_path", type=str, default="all_data_pack.npy")
    parser.add_argument("--mask_path", type=str, default="all_mask_pack.npy")
    parser.add_argument("--train_data_path", type=str, default="train_data.npy")
    parser.add_argument("--val_data_path", type=str, default="val_data.npy")
    parser.add_argument("--test_data_path", type=str, default="test_data.npy")

    parser.add_argument("--train", type=str, default="train")
    parser.add_argument("--train_ratio", type=float, default=0.8)
    parser.add_argument("--val_ratio", type=float, default=0.1)
    parser.add_argument("--test_ratio", type=float, default=0.1)

    # training  &  model
    parser.add_argument("--batch_size", type=int, default=64)
    parser.add_argument("--epochs", type=int, default=100)
    parser.add_argument("--lr", type=float, default=1e-4)

    parser.add_argument('--ts_input', type=int, default=8, help='dimension of time and space feature.')
    parser.add_argument('--vel_input', type=int, default=1, help='dimension of wind velocity feature.')
    parser.add_argument('--times_num', type=int, default=16, help='wind velocity meausred at 16 different heights.')
    parser.add_argument('--modal_nums', type=int, default=2, help='the number of models.')

    parser.add_argument('--d_model', type=int, default=256, help='the dimension after embedding.')
    parser.add_argument('--dff', type=int, default=512, help='the number of the units.')
    parser.add_argument('--num_heads', type=int, default=8, help='number of the heads of the multi-head model.')
    parser.add_argument('--num_layers', type=int, default=1, help='the number of attention layers.')
    parser.add_argument('--num_bnks', type=int, default=9, help='')
    parser.add_argument('--bnks_layers', type=int, default=9, help='')

    return parser

# 读取数据
class S4MaxDataset(Dataset):
    def __init__(self, train="train", data_path="all_data_pack.npy"):
        super(S4MaxDataset, self).__init__()
        if train == "train":
            data_path = "train_data.npy"
        elif train == "val":
            data_path = "val_data.npy"
        elif train == "test":
            data_path = "test_data.npy"
        self.data = np.load(data_path)
        
        self.data = np.nan_to_num(self.data, nan=0.0)  # 把 nan 替换为 0
        self.num_samples = self.data.shape[0]   # 47033
        self.num_features = self.data.shape[1]  # 24
        self.max_data, self.min_data = np.max(self.data, axis=0), np.min(self.data, axis=0)

    def __len__(self):
        return self.num_samples

    def __getitem__(self, index):
        data_index = self.data[index]

        # 时空信息
        year, month, day, hour, lat, lon, alt = data_index[:7]
        day_of_year = pd.Timestamp(year=int(year), month=int(month), day=int(day)).dayofyear
        day_sin = 0.5 * np.sin(2 * np.pi * day_of_year / 365) + 0.5
        day_cos = 0.5 * np.cos(2 * np.pi * day_of_year / 365) + 0.5
        hour_sin = 0.5 * np.sin(2 * np.pi * hour / 24) + 0.5
        hour_cos = 0.5 * np.cos(2 * np.pi * hour / 24) + 0.5        
        lat = (lat + self.min_data[4]) / (self.max_data[4] - self.min_data[4])
        lon_sin = 0.5 * np.sin(2 * np.pi * lon / 180) + 0.5
        lon_cos = 0.5 * np.cos(2 * np.pi * lon / 180) + 0.5
        alt = (alt - self.min_data[6]) / (self.max_data[6] - self.min_data[6])
        time_space = torch.tensor([day_sin, day_cos, hour_sin, hour_cos, lat, lon_sin, lon_cos, alt], dtype=torch.float32)

        # 风速
        wind = torch.tensor(data_index, dtype=torch.float32)[7:-1]
        wind = (wind - self.min_data[7:-1]) / (self.max_data[7:-1] - self.min_data[7:-1])

        # label
        y = np.log(data_index[-1] + 1)
        y = torch.tensor(y, dtype=torch.float32)

        return time_space, wind, y


# 划分数据集
def split_data(args):
    data_path=args.data_path
    mask_path=args.mask_path
    train_ratio=args.train_ratio
    val_ratio=args.val_ratio
    test_ratio=args.test_ratio
    train_data_path=args.train_data_path
    val_data_path=args.val_data_path
    test_data_path=args.test_data_path

    # Load data from npy file
    data = np.load(data_path)
    mask = np.load(mask_path)
    assert data.shape == mask.shape   # (47033, 24)
    assert train_ratio + val_ratio + test_ratio == 1.0

    # randomly split the data into training, validation and test sets    
    indices = list(range(data.shape[0]))    
    random.shuffle(indices)
    train_indices = indices[:int(train_ratio * len(indices))]
    val_indices = indices[int(train_ratio * len(indices)):int((train_ratio + val_ratio) * len(indices))]
    test_indices = indices[int((train_ratio + val_ratio) * len(indices)):]

    train_data = data[train_indices]
    val_data = data[val_indices]
    test_data = data[test_indices]

    # Save the split data into npy files
    np.save(train_data_path, train_data)
    np.save(val_data_path, val_data)
    np.save(test_data_path, test_data)

    print("Data shape:", data.shape)    # (47033, 24)
    print("train : val : test = {} : {} : {} = {} : {} : {}".format(
        len(train_indices), len(val_indices), len(test_indices), train_ratio, val_ratio, test_ratio))


def visualize_all_data(args):
    data_path=args.data_path
    mask_path=args.mask_path

    # Load data from npy file
    data = np.load(data_path)
    mask = np.load(mask_path)
    # 把 mask 中 False 对应的位置的数据从 nan 替换为 0
    data = np.nan_to_num(data, nan=0.0)

    # Print the shape of the data and mask
    print("Data shape:", data.shape)    # (47033, 24)
    print("Mask shape:", mask.shape)    # (47033, 24)

    if True:
        # randomly select some data and mask to visualize
        indices = random.sample(range(data.shape[0]), 4)
        for i in indices:
            print("Data:", data[i])     # 24 features：
                                        # 前7个是时空信息，都是有数据的，后面 8-23 的feature是不同高度下的风速，存在 nan 情况，最后一个是 label
            print("Mask:", mask[i])     # 24 features： 
                                        # True 表示有数据，False 表示没有数据
            print("-----------" * 10)   # Data: [ 2.02000000e+03  1.10000000e+01  1.10000000e+01  1.29700003e+01
                                        #         5.53601313e+00 -1.16242729e+02  1.09806694e+02 -2.75727957e-01
                                        #         3.60597968e-02  4.59533781e-02  1.96724892e-01  6.88194811e-01
                                        #         1.37299311e+00  7.69371176e+00             nan             nan
                                        #                     nan             nan             nan             nan
                                        #                     nan             nan             nan  5.33345006e-02]
                                        # Mask: [ True  True  True  True  True  True  True  True  True  True  True  True
                                        #         True  True False False False False False False False False False  True]
        # label distribution    
        label = data[:, -1]
        max_label = np.max(label)
        min_label = np.min(label)
        print("mid label : ", np.median(label))  # mid label :  0.1435077041387558
        print("label distribution : [ {} , {} ] ".format(min_label, max_label))  # label distribution : [ 0.030818700790405273 , 4.230077743530273 ] 
        plt.hist(label, bins=20, rwidth=0.8)
        plt.show()


# train
def train_model(model, criterion, optimizer, train_loader, val_loader, num_epochs):
    writer = SummaryWriter("logs")
    for epoch in range(num_epochs):
        model.train()
        train_loss = 0.0
        for time_space, wind, targets in train_loader: 
            time_space = time_space.reshape(-1, 1, args.ts_input)           # [batch_size, 1, 8]
            time_space = time_space.repeat(1, 16, 1).to(device).float()     # [batch_size, 16, 8]
            wind = wind.reshape(-1, 16, 1).to(device).float()               # [batch_size, 16, 1]
            targets = targets.reshape(-1, 1).to(device).float()             # [batch_size, 1]
            
            optimizer.zero_grad()
            pred = model(time_space, wind)            
            loss = criterion(pred, targets)
            loss.backward()
            optimizer.step()
            train_loss += loss.item()
        train_loss /= len(train_loader)
        print(f"Epoch [{epoch+1}/{num_epochs}], Train Loss: {train_loss:.6f}")
        writer.add_scalar("Loss/train", train_loss, epoch)

        # validate
        if val_loader is not None and (epoch+1) % 5 == 0:
            model.eval()
            val_loss = 0.0
            with torch.no_grad():
                for time_space, wind, targets in val_loader:
                    time_space = time_space.reshape(-1, 1, args.ts_input)           # [batch_size, 1, 8]
                    time_space = time_space.repeat(1, 16, 1).to(device).float()     # [batch_size, 16, 8]
                    wind = wind.reshape(-1, 16, 1).to(device).float()               # [batch_size, 16, 1]
                    targets = targets.reshape(-1, 1).to(device).float()             # [batch_size, 1]

                    pred = model(time_space, wind)         
                    loss = criterion(pred, targets)
                    val_loss += loss.item()
                val_loss /= len(val_loader)
                print(f"Epoch [{epoch+1}/{num_epochs}], Val Loss: {val_loss:.6f}")
                writer.add_scalar("Loss/val", val_loss, epoch)

    save_path = "model.pth"
    torch.save(model.state_dict(), save_path)
    print("Finished Training!")
    return model


def test_model(model, test_loader):
    model.load_state_dict(torch.load("model.pth")) # load the trained model
    model.eval()
    total_err = 0.0 
    with torch.no_grad():
        for time_space, wind, targets in test_loader:
            time_space = time_space.reshape(-1, 1, args.ts_input)           # [batch_size, 1, 8]
            time_space = time_space.repeat(1, 16, 1).to(device).float()     # [batch_size, 16, 8]
            wind = wind.reshape(-1, 16, 1).to(device).float()               # [batch_size, 16, 1]
            targets = targets.reshape(-1, 1).to(device).float()             # [batch_size, 1]

            pred = model(time_space, wind)
            # MSE(均方误差) = (预测值-真实值)^2 的平均值
            targets = targets.cpu().numpy()
            targets = np.exp(targets) - 1
            pred = pred.cpu().numpy()
            pred = np.exp(pred) - 1
            err =  (pred - targets) ** 2
            err = np.mean(err)
            total_err += err
        total_err /= len(test_loader)
        print(f"Test MSE: {total_err:.6f}")


def main(args, train=True):
    model = S4Max(args).to(device)

    if train:
        train_dataset = S4MaxDataset(train="train")
        val_dataset = S4MaxDataset(train="val")
        train_loader = DataLoader(train_dataset, batch_size=args.batch_size, shuffle=True)
        val_loader = DataLoader(val_dataset, batch_size=args.batch_size, shuffle=False)
        criterion = torch.nn.MSELoss()
        optimizer = torch.optim.Adam(model.parameters(), lr=args.lr)
        train_model(model, criterion, optimizer, train_loader, val_loader, num_epochs=args.epochs)
        
    test_dataset = S4MaxDataset(train="test")
    test_loader = DataLoader(test_dataset, batch_size=args.batch_size, shuffle=False)
    test_model(model, test_loader)
    
if __name__ == "__main__":
    args = configs().parse_args()

    # split_data(args)
    # print("Data split done!")

    # visualize_all_data(args)

    main(args, train=False)
