import os
import pandas as pd
import numpy as np
import torch
from torch.utils.data import Dataset, DataLoader
# from sklearn.preprocessing import StandardScaler
from sklearn.metrics.pairwise import cosine_similarity
import warnings
from sklearn import utils

warnings.filterwarnings('ignore')

class ReadData():
    def __init__(self, args, root_path, data_path, window_size, window_num='0', size=None, features='M', target=None):
        if size is None:
            self.seq_len = 30
            self.label_len = 1
            self.pred_len = 10
        else:
            self.seq_len = size[0]
            self.label_len = size[1]
            self.pred_len = size[2]
        # init
        # 训练、验证、测试模式选择

        self.features = features  # 任务类型
        self.target = target  # 目标特征维度
        self.root_path = root_path  # 数据根目录
        self.data_path = data_path  # 数据文件名
        self.windeow_size = window_size  # 滑动窗口大小
        self.window_num = window_num  # 滑动窗口编号
        self.args = args
        self.read_data()


    def read_data(self):
        data_file = os.path.join(self.root_path, self.data_path)
        data = pd.read_csv(data_file, header=0, index_col=None, engine='python').values
        if self.features == 'S':
            data = data[:, self.target, np.newaxis]
        
        start_index = self.windeow_size * self.window_num
        end_index = start_index + self.windeow_size

        if start_index != 0:
            start_index = start_index - self.seq_len - self.pred_len + 1
        
        data_window = data[start_index: end_index]

        df = pd.DataFrame(data_window)
        data_window = df.fillna(method='ffill', limit=len(df)).fillna(method='bfill', limit=len(df)).values
        self.data_window = data_window

        return self.data_window
    

    def slice_data(self):
        
        length = len(self.data_window) - self.seq_len - self.pred_len + 1
        X = []
        Y = []

        for index in range(length):
            s_begin = index
            s_end = s_begin + self.seq_len
            r_begin = s_end - self.label_len
            r_end = r_begin + self.label_len + self.pred_len

            seq_x = self.data_window[s_begin: s_end]  # encoder输入
            seq_y = self.data_window[r_begin: r_end]  # decoder输入  
            
            X.append(seq_x)
            Y.append(seq_y)
        X = np.array(X)  # [length, seq_len, channels]
        Y = np.array(Y)  # [length, label_len+pred_len, channels]
        return X, Y
    

class Dataset_Spectrum(Dataset):
    def __init__(self, args, X, Y):
        super(Dataset_Spectrum, self).__init__()
        """dataset  
        Args:
            X (numpy): [length, seq_len, channels]
            Y (numpy): [length, label_len+pred_len, channels]
        """
        
        self.args = args
        self.use_gpu =  args.use_gpu
        self.features = args.features
        self.__read_data__(X, Y)

    
    def __read_data__(self, X, Y):
        self.X = torch.tensor(X)
        self.Y = torch.tensor(Y)
        

    def __getitem__(self, index):
        """数据切分方式
        """
        seq_x = self.X[index]  # encoder输入  [seq_len, channels]
        seq_y = self.Y[index]  # decoder输入  
        seq_x_mark = torch.zeros((seq_x.shape[0], 1))  # encoder时间标签
        seq_y_mark = torch.zeros((seq_y.shape[0], 1))  # decoder时间标签
        return seq_x, seq_y, seq_x_mark, seq_y_mark, index
    
    
    def __len__(self):
        """划分完成后的样本数(index的范围)
        """
        return len(self.X)
    
        
    def fine_tuning(self, index):
        """fine-tuning
        """
        start_index = max(0, index - self.lambda_T)
        seq_x = self.X[start_index : index]  # [lambda_T, seq_len, channels]
        seq_y = self.Y[start_index : index]  # [lambda_T, label_len+pred_len, channels]
        seq_x_mark = torch.zeros((seq_x.shape[1], 1))
        seq_y_mark = torch.zeros((seq_y.shape[1], 1))
        x_set, y_set = utils.shuffle(seq_x, seq_y)
        x_set = torch.tensor(x_set).reshape(-1, self.args.fine_tuning_batch, self.args.seq_len, self.args.c_out) # [nums, batch, seq_len, channels]
        y_set = torch.tensor(y_set).reshape(-1, self.args.fine_tuning_batch, 
                                               self.args.label_len+self.args.pred_len, self.args.c_out)  # [nums, batch, label_len+pred_len, channels]

        return x_set, y_set, seq_x_mark, seq_y_mark
    
    def fine_tuning_similar(self, index):
        """fine-tuning
        """
        start_index = max(0, index - self.lambda_T)
        x_index = self.X[index] # [1, seq_len, channels]
        x_index = x_index.reshape(1, self.args.seq_len, self.args.c_out)
        
        seq_x = self.X[start_index : index]  # [lambda_T, seq_len, channels]
        seq_y = self.Y[start_index : index]  # [lambda_T, label_len+pred_len, channels]

        means_seq_x = torch.mean(seq_x, dim=1, keepdim=True)  # [batch, 1, num_channels]
        stdev_seq_x = torch.std(seq_x, dim=1, keepdim=True) + 1e-6  # [batch, 1, num_channels]
        seq_x = (seq_x - means_seq_x) / stdev_seq_x  # x的归一化值 [batch, seq_len, num_channels]
        
        means_x_index = torch.mean(x_index, dim=1, keepdim=True)
        stdev_x_index = torch.std(x_index, dim=1, keepdim=True) + 1e-6
        x_index = (x_index - means_x_index) / stdev_x_index


        similarities = cosine_similarity(x_index.reshape(1, -1), seq_x.reshape(seq_x.shape[0], -1))
        similar_indices = np.where(similarities[0] > self.args.similar_threshold)[0]

        seq_x = seq_x * stdev_seq_x + means_seq_x
        x_index = x_index * stdev_x_index + means_x_index

        seq_x = seq_x[similar_indices]  # [lambda_T//, seq_len, channels]
        seq_y = seq_y[similar_indices]  # [lambda_T//, label_len+pred_len, channels]
        seq_x_mark = torch.zeros((seq_x.shape[1], 1))
        seq_y_mark = torch.zeros((seq_y.shape[1], 1))
        x_set, y_set = utils.shuffle(seq_x, seq_y)

        # 按批次划分，不足一个批次的数据不丢弃
        x_batches = torch.split(x_set, self.args.fine_tuning_batch)  # [nums, batch, seq_len, channels]
        y_batches = torch.split(y_set, self.args.fine_tuning_batch)  # [nums, batch, label_len+pred_len, channels]

        # 将不足一个批次的数据补齐
        if len(x_batches[-1]) < self.args.fine_tuning_batch:
            x_batches[-1] = torch.cat([x_batches[-1], torch.zeros(self.args.fine_tuning_batch - len(x_batches[-1]), *x_batches[-1].shape[1:])], dim=0)
            y_batches[-1] = torch.cat([y_batches[-1], torch.zeros(self.args.fine_tuning_batch - len(y_batches[-1]), *y_batches[-1].shape[1:])], dim=0)

        # 需要将x_batches和y_batches中的每个批次合并成一个张量
        x_batches = torch.stack(x_batches)  # [nums, batch, seq_len, channels]
        y_batches = torch.stack(y_batches)  # [nums, batch, label_len+pred_len, channels]
        
        return x_batches, y_batches, seq_x_mark, seq_y_mark


def similarity_decision(X, centers):

    X = X.reshape((X.shape[0], -1))  # X:[length, seq_len*channels]
    # 计算X与每个center的距离
    distances = np.linalg.norm(X[:, np.newaxis] - centers, axis=2)  # distances:[length, n_clusters]
    # 找到距离最近的center的索引
    closest_centers = np.argmin(distances, axis=1)  # closest_centers:[length]
    return closest_centers



data_dict = {
    'spectrum': Dataset_Spectrum,
}

def train_provider(args, centers, labels, distance):
    centers = np.array(centers)
    labels = np.array(labels)
    distance = np.array(distance)

    Data = data_dict[args.data]
    timeenc = 0 if args.embed != 'timeF' else 1
    Datas = ReadData(args, args.root_path, args.data_path, size=[args.seq_len, args.label_len, args.pred_len],
                     flag='train', scale=True, features=args.features, target=args.target, timeenc=timeenc, freq=args.freq)
    X, Y, scaler, scaler_MS = Datas.slice_data()  # X:[length, seq_len, channels]

    n_clusters = len(centers)
    X_clusters = []  # [n_clusters, num, seq_len, channnels]
    Y_clusters = []
    data_set_clusters = []  # [n_clusters]
    data_loader_clusters = []  # [n_clusters]
    for i in range(n_clusters):
        X_cluster = X[labels == i]
        Y_cluster = Y[labels == i]
        X_clusters.append(X_cluster)
        Y_clusters.append(Y_cluster)
        data_set = Data(args, X_cluster, Y_cluster, scaler, scaler_MS)
        data_set_clusters.append(data_set)
        print(f'train_cluster{i}:', len(data_set))
    
        data_loader = DataLoader(
            data_set,
            batch_size=args.batch_size,
            shuffle=True,
            num_workers=args.num_workers,
            drop_last=False)
        
        data_loader_clusters.append(data_loader)
    return data_set_clusters, data_loader_clusters


def val_provider(args, centers, labels, distance):
    centers = np.array(centers)
    labels = np.array(labels)
    distance = np.array(distance)

    Data = data_dict[args.data]
    timeenc = 0 if args.embed != 'timeF' else 1
    Datas = ReadData(args, args.root_path, args.data_path, size=[args.seq_len, args.label_len, args.pred_len],
                     flag='val', scale=True, features=args.features, target=args.target, timeenc=timeenc, freq=args.freq)
    X, Y, scaler, scaler_MS = Datas.slice_data()  # X:[length, seq_len, channels]

    n_clusters = len(centers)
    X_clusters = []  # [n_clusters, num, seq_len, channnels]
    Y_clusters = []
    data_set_clusters = []  # [n_clusters]
    data_loader_clusters = []  # [n_clusters]
    labels = similarity_decision(X, centers)

    for i in range(n_clusters):
        X_cluster = X[labels == i]
        Y_cluster = Y[labels == i]
        if len(X_cluster) > 0:  # 仅当数据集大小大于0时才创建DataLoader
            X_clusters.append(X_cluster)
            Y_clusters.append(Y_cluster)
            data_set = Data(args, X_cluster, Y_cluster, scaler, scaler_MS)
            print(f'val_cluster{i}:', len(data_set))
            data_loader = DataLoader(
                data_set,
                batch_size=args.batch_size,
                shuffle=True,
                num_workers=args.num_workers,
                drop_last=True
            )
            data_set_clusters.append(data_set)
            data_loader_clusters.append(data_loader)
        else:
            X_clusters.append(None)
            Y_clusters.append(None)
            data_set_clusters.append(None)
            data_loader_clusters.append(None)
            print(f'Cluster {i} has no data')
    return data_set_clusters, data_loader_clusters


def test_provider(args):

    Data = data_dict[args.data]
    timeenc = 0 if args.embed != 'timeF' else 1
    Datas = ReadData(args, args.root_path, args.data_path, size=[args.seq_len, args.label_len, args.pred_len],
                     flag='test', scale=True, features=args.features, target=args.target, timeenc=timeenc, freq=args.freq)
    X, Y, scaler, scaler_MS = Datas.slice_data()  # X:[length, seq_len, channels]

    data_set = Data(args, X, Y, scaler, scaler_MS)
    print('test_datas:', len(data_set))
    
    data_loader = DataLoader(
        data_set,
        batch_size=1,
        shuffle=False,
        num_workers=args.num_workers,
        drop_last=False)
        
    return data_set, data_loader