from scipy.io import arff
import numpy as np
import torch
import pandas as pd


class ArffDataProcessor:
    def __init__(self, filePath, DataDistribution = None):
        """
        初始化 ArffDataProcessor 类。
        :param filePath: ARFF 文件的路径。
         param DataDistribution: 数据标准化或归一化   
                # 使用示例
                filePath = 'Data\SpokenArabicDigits\SpokenArabicDigits_TRAIN.arff'
                processor = ArffDataProcessor(filePath)
                samples, labels, paddingMask = processor.getAllData(100, 0.0)       
        """
        self.filePath = filePath
        self.samplesArray, self.labelsArray, self.categoryToLabel, self.labelToCategory = self.getDataFromArff()
        print(self.samplesArray.shape)
        self.samplesArray = self.Nomorlizer(self.samplesArray, DataDistribution = None)
        
    def Nomorlizer(self, Array, DataDistribution):
        '''标准化/归一化处理数据
        参数: 
            Array: 要处理的numpy数组, 形状为[样本数，样本维度，长度]
            DataDistribution: 处理方法, StandardizeEach->单个样本标准化
                                        NormalizeEach->单个样本归一化
                                        StandardizeAll->全部样本标准化
                                        NormalizeAll->全部样本归一化
        '''
        #Array是一个[样本数，维度，长度]的数组
        normalized_data = np.copy(Array)
        if DataDistribution == 'StandardizeEach':
            # 对每个样本进行标准化
            for i in range(Array.shape[0]):  # 遍历所有样本
                sample = Array[i]
                # 计算均值和标准差
                mean = np.nanmean(sample, axis=1, keepdims=True)
                std = np.nanstd(sample, axis=1, keepdims=True)
                # 避免除以零
                std[std == 0] = 1
                # 标准化
                normalized_sample = (sample - mean) / std
                # 保留 NaN 位置
                nan_mask = np.isnan(sample)
                normalized_sample[nan_mask] = np.nan
                normalized_data[i] = normalized_sample
        if DataDistribution == 'NormalizeEach':
            for i in range(Array.shape[0]):  # 遍历所有样本
                sample = Array[i]
                # 计算最大最小值
                min_val = np.nanmin(sample, axis=1, keepdims=True)
                max_val = np.nanmax(sample, axis=1, keepdims=True)
                # 避免除以零
                range_val = max_val - min_val
                range_val[range_val == 0] = 1
                # 标准化
                normalized_sample = (sample - min_val) / range_val
                # 保留 NaN 位置
                nan_mask = np.isnan(sample)
                normalized_sample[nan_mask] = np.nan
                normalized_data[i] = normalized_sample
        if DataDistribution == 'NormalizeAll':
            # 计算整个数据集（忽略 NaN）的最大值和最小值
            min_val = np.nanmin(Array)
            max_val = np.nanmax(Array)
                # 避免除以零
            range_val = max_val - min_val
            if range_val == 0:
                range_val = 1
            # 标准化
            normalized_data = (normalized_data - min_val) / range_val
                # 保留原始数据中的 NaN 位置
            nan_mask = np.isnan(Array)
            normalized_data[nan_mask] = np.nan
        if DataDistribution == 'StandardizeAll':
            # 计算整个数据集（忽略 NaN）的均值和标准差
            mean = np.nanmean(Array)
            std = np.nanstd(Array)
                # 避免除以零
            if std == 0:
                std = 1
            # 标准化
            normalized_data = (normalized_data - mean) / std
                # 保留原始数据中的 NaN 位置
            nan_mask = np.isnan(Array)
            normalized_data[nan_mask] = np.nan
        return normalized_data 
                
    def getAllData(self, maxLength, paddingValue):
        """
        从 ARFF 文件中加载数据，并进行预处理和填充。
        :param maxLength: 需要填充到的最大长度。
        :param paddingValue: 用于填充的值。
        :return: 填充后的样本张量, 样本标签，填充掩码张量。
        """
        self.paddedSamplesTensor, self.paddingMaskTensor = self.padData(self.samplesArray, maxLength, paddingValue)
        self.labelsTensor = torch.tensor(self.labelsArray, dtype=torch.long)
        return self.paddedSamplesTensor, self.labelsTensor, self.paddingMaskTensor
        
    def getDataFromArff(self):
        """
        从 ARFF 文件中获取数据，将其转换为 PyTorch 张量。
        ARFF 文件结构要求：第一个字段是多维数据，第二个字段是类别
        :param filePath: ARFF 文件的路径
        :return:    samplesArray: 包含样本数据的元组，每个元素是一个样本的 numpy 数组,
                    categoriesArray: 映射为整数的类别numpy数组,
                    categoryToInt: 键值对，类别到整数映射,
                    intToCategory: 键值对，整数到类别映射
        """
        # 加载 ARFF 文件
        data, meta = arff.loadarff(self.filePath)
        # 提取多维数据和类别值
        samplesData = data[data.dtype.names[0]]
        categories = data[data.dtype.names[1]]
        # 处理样本数据，排除包含 '?' 的数据
        samplesArray = []
        for sample in samplesData:
            sampleList = sample.tolist()
            if '?' not in sampleList:
                samplesArray.append(sampleList)
        samplesArray = np.array(samplesArray)
        # 处理类别数据，将二进制字符串转换为普通字符串，替换 '?' 为 np.nan
        processedCategories = [val.decode('utf-8') if val != b'?' else np.nan for val in categories]
        categoriesArray = np.array(processedCategories)
        # 创建类别到整数的映射
        uniqueCategories = np.unique(categoriesArray[~pd.isnull(categoriesArray)])
        categoryToInt = {key: value for value, key in enumerate(uniqueCategories)}
        intToCategory = {value: key for key, value in categoryToInt.items()}

        intCategories = np.array([categoryToInt[category] if category in categoryToInt else -1 
                                   for category in categoriesArray])

        categoriesArray = np.array(intCategories)
        
        # 计算样本维度
        self.sampleDim = samplesArray.shape[1] if len(samplesArray.shape) > 1 else 1

        return samplesArray, categoriesArray, categoryToInt, intToCategory

    def padData(self, samplesArray, maxLength, paddingValue):
        """
        对样本数组进行填充处理。
        :param samplesArray: 由 getDataFromArff 函数返回的样本数组。
        :param maxLength: 要填充到的最大长度。
        :param paddingValue: 用于填充的值。
        :return: paddedSamplesTensor: 填充后的样本张量，形状为[batchsize,lenth, dim ]
                paddingMaskTensor: 填充掩码张量,与样本张量形状相同, bool类型, 位置与样本张量一一对应
        """
        # 初始化填充后的数组和填充掩码
        paddedSamples = np.full((samplesArray.shape[0], samplesArray.shape[1], maxLength), 
                                 fill_value=paddingValue)
        paddingMask = np.zeros((samplesArray.shape[0], samplesArray.shape[1], maxLength), dtype=bool)
        # 对每个样本进行填充处理
        for i, sample in enumerate(samplesArray):
            for j, dimension in enumerate(sample):
                validLength = np.argwhere(~np.isnan(dimension)).shape[0]
                paddedSamples[i, j, :validLength] = dimension[:validLength]
                paddingMask[i, j, :validLength] = True
        # 转换为 PyTorch 张量
        paddedSamplesTensor = torch.tensor(paddedSamples, dtype=torch.float32)
        paddingMaskTensor = torch.tensor(paddingMask, dtype=torch.bool)
        #转置，将形状转化为[batchsize,lenth, dim ]
        paddedSamplesTensor = paddedSamplesTensor.transpose(1, 2)
        # 修改 paddingMaskTensor 以获得形状 [batchsize, length]
        # 使用逻辑或操作将 dim 维度压缩
        paddingMaskTensor = paddingMaskTensor.any(dim=1)
        return paddedSamplesTensor, paddingMaskTensor
        
    def getSampleDim(self):
        """
        返回样本的维度。
        """
        return self.sampleDim
    
    def getNumClasses(self):
        """
        返回类别的数量。
        """
        return len(self.categoryToLabel)
    
    
# filePath = 'Data\SpokenArabicDigits\SpokenArabicDigits_TRAIN.arff'
# processor = ArffDataProcessor(filePath)
# samples, labels, paddingMask = processor.getAllData(100, 0.0)       
# print(samples.shape, labels.shape, paddingMask.shape)
# print(processor.getNumClasses(),processor.getSampleDim())