import numpy as np
import mne
from scipy import signal
import os
import json
import glob
# import warnings
# warnings.filterwarnings('error') 


class LoadMachineDataSet:
    targetWordMatrix = np.array(
        [
            ["a", "b", "c", "d", "e", "f"],
            ["g", "h", "i", "j", "k", "l"],
            ["m", "n", "o", "p", "q", "r"],
            ["s", "t", "u", "v", "w", "x"],
            ["y", "z", "1", "2", "3", "4"],
            ["5", "6", "7", "8", "9", "_"],
        ]
    )

    def __init__(
        self,
        path: str,
        nchannel: list[int, int, int],
        labelCol: int,
        targetWordJson: str,
        csvDelimiter: str = "\t",
        loadDataOnly: bool = False,
    ) -> None:
        """
        Description:
            该类用于从csv文件中读取机器数据制作成数据集

        Parameters:
            path:str
                csv文件的路径或csv文件所在的目录
            nchannel:list[int,int,int]
                需要提取的通道
            labelCol:int
                打标所在的列
            targetWord:str
                目标字符
            csvDelimiter:str
                csv文件的分隔符，默认为'\t'
        """
        self.path = path
        self.nchannel = nchannel
        self.csvDelimiter = csvDelimiter
        self.resampleLen = 307
        self.targetWordDict: dict = self.TargetWordJsonToDict(targetWordJson)
        self.baseline=[-200, 100]  # in ms
        self.frame=[100, 800]  # in ms

        if loadDataOnly:
            self.targetData, self.nonTargetData = self.loadDataOnly(
                path, nchannel, labelCol
            )
        else:
            self.targetData, self.nonTargetData = self.analyzeFeature(
                path, nchannel, labelCol
            )

    def TargetWordJsonToDict(self, path: str) -> dict:
        """
        将存储目标字符的Json文件转换为字典
        """
        if not os.path.exists(path):
            raise Exception("目标字符Json文件不存在")
        res = None
        with open(path, "r") as f:
            res = json.load(f)
        return res

    def TargetWordCompareMark(self, mark: int, targetWord: str) -> bool:
        """
        解析成mark的值
        值的含义是flash_index*100+trail_index
        flash_index是当前闪烁字符的索引，而train_index是当前闪烁的行
        字符排序是如下：
            7   8   9   10  11  12
        1   A   B   C   D   E   F
        2   G   H   I   J   K   L
        3   M   N   O   P   Q   R
        4   S   T   U   V   W   X
        5   Y   Z   1   2   3   4
        6   5   6   7   8   9   _
        """
        if len(targetWord) == 0:
            raise Exception("targetWord不能为空")
        flash_index = int(mark // 100)
        trail_index = int(mark % 100)
        target = targetWord[flash_index - 1]
        targetArray = None
        if trail_index <= 6:
            targetArray = LoadMachineDataSet.targetWordMatrix[trail_index - 1, :]
        else:
            targetArray = LoadMachineDataSet.targetWordMatrix[:, trail_index - 7]
        if target in targetArray:
            return True
        else:
            return False

    def analyzeFeature(
        self, path: str, nchannel: list[int, int, int], labelCol: int
    ) -> tuple[np.ndarray, np.ndarray]:
        """
        Description:
            从csv文件中提取数据

        Parameters:
            path:str
                csv文件的路径或csv文件所在的目录
            nchannel:list[int,int,int]
                需要提取的通道
            labelCol:int
                打标所在的列

        Returns:
            targetSignal:np.ndarray
                提取的目标信号数据，大小为(3,307,Num)
            nonTargetSignal:np.ndarray
                提取的非目标信号数据，大小为(3,307,Num)
        """
        files = []
        if os.path.isdir(path):
            files = glob.glob(os.path.join(path, "*.csv"))
        elif os.path.isfile(path):
            files = [path]
        else:
            raise Exception("path必须是文件或目录")
        if len(nchannel) != 3:
            raise Exception("nchannel的长度必须为3")

        targetSignal = []  # np.zeros((len(nchannel), self.resampleLen))
        nonTargetSignal = []  # np.zeros((len(nchannel), self.resampleLen))

        for i in range(len(files)):
            file = files[i]
            try:
                targetWord = self.targetWordDict[os.path.basename(file)]
            except KeyError:
                continue
            # 数据纵向排列
            data = np.loadtxt(file, delimiter=self.csvDelimiter)
            # 选择需要的通道
            rawSignal = data[:, nchannel]
            # 选择需要的打标列
            labelIndex = np.nonzero(data[:, labelCol])[0]
            # 数据横向排列
            rawSignal = rawSignal.T
            tempLow = np.zeros(rawSignal.shape)
            tempHigh = np.zeros(rawSignal.shape)

            # 基线校正
            for i in range(len(labelIndex)):
                event_pos = labelIndex[i]
                start_sample = event_pos + self.frame[0]
                end_sample = event_pos + self.frame[1]

                begin_base = event_pos+int(self.baseline[0])
                if begin_base < 0:
                    begin_base = 0
                end_base = int(begin_base + np.floor(np.diff(self.baseline)) - 1)
                base = np.mean(rawSignal[:, begin_base:end_base], axis=1)

                rawSignal[:, start_sample:end_sample] -= base[:, np.newaxis]

            # 数据处理

            for i in range(rawSignal.shape[0]):
                tempLow[i, :] = mne.filter.filter_data(
                    rawSignal[i, :], sfreq=1000, l_freq=0.5, h_freq=1.5, verbose=False
                )
                tempHigh[i, :] = mne.filter.filter_data(
                    rawSignal[i, :], sfreq=1000, l_freq=0.5, h_freq=24, verbose=False
                )
                rawSignal[i, :] = tempHigh[i, :] - tempLow[i, :]

            for i in range(len(labelIndex)):
                tmp = np.zeros((len(nchannel), self.resampleLen))
                for j in range(len(nchannel)):
                    tmp[j, :] = signal.resample(
                        rawSignal[j, labelIndex[i] + self.frame[0] : labelIndex[i] + self.frame[1]],
                        self.resampleLen,
                    )
                if self.TargetWordCompareMark(
                    np.take(data[:, labelCol], labelIndex[i]), targetWord.lower()
                ):
                    # targetSignal = np.dstack((targetSignal, tmp))
                    targetSignal.append(tmp)
                else:
                    # nonTargetSignal = np.dstack((nonTargetSignal, tmp))
                    nonTargetSignal.append(tmp)

        # targetSignal = np.delete(targetSignal, 0, axis=2)
        # nonTargetSignal = np.delete(nonTargetSignal, 0, axis=2)
        targetSignal = np.stack(targetSignal, axis=2)
        nonTargetSignal = np.stack(nonTargetSignal, axis=2)
        return targetSignal, nonTargetSignal

    def loadDataOnly(
        self, path: str, nchannel: list[int, int, int], labelCol: int
    ) -> tuple[np.ndarray, np.ndarray]:
        """
        Description:
            从csv文件中提取数据

        Parameters:
            path:str
                csv文件的路径或csv文件所在的目录
            nchannel:list[int,int,int]
                需要提取的通道
            labelCol:int
                打标所在的列

        Returns:
            resultSignal:np.ndarray
                提取的信号数据，大小为(3,307,Num)
            markNumList:np.ndarray
                提取的打标的内容数据
        """
        files = []
        if os.path.isdir(path):
            files = glob.glob(os.path.join(path, "*.csv"))
        elif os.path.isfile(path):
            files = [path]
        else:
            raise Exception("path必须是文件或目录")
        if len(nchannel) != 3:
            raise Exception("nchannel的长度必须为3")

        # Signal = np.zeros((len(nchannel), self.resampleLen))
        Signal = []
        markNumList = []

        for file in files:
            # 数据纵向排列
            data = np.loadtxt(file, delimiter=self.csvDelimiter)
            # 选择需要的通道
            rawSignal = data[:, nchannel]
            # 数据横向排列
            rawSignal = rawSignal.T
            # 取出打标的索引
            labelIndex = np.nonzero(data[:, labelCol])[0]
            mark = np.take(data[:, labelCol], labelIndex)
            markNumList.append(mark)
            tempLow = np.zeros(rawSignal.shape)
            tempHigh = np.zeros(rawSignal.shape)
            
            # 数据处理
            for i in range(rawSignal.shape[0]):
                tempLow[i, :] = mne.filter.filter_data(
                    rawSignal[i, :], sfreq=1000, l_freq=0.5, h_freq=1.5, verbose=False
                )
                tempHigh[i, :] = mne.filter.filter_data(
                    rawSignal[i, :], sfreq=1000, l_freq=0.5, h_freq=24, verbose=False
                )
                rawSignal[i, :] = tempHigh[i, :] - tempLow[i, :]

            for i in range(len(labelIndex)):
                tmp = np.zeros((len(nchannel), self.resampleLen))
                for j in range(len(nchannel)):
                    tmp[j, :] = signal.resample(
                        rawSignal[j, labelIndex[i] + self.frame[0] : labelIndex[i] + self.frame[1]],
                        self.resampleLen,
                    )
                # Signal = np.dstack((Signal, tmp))
                Signal.append(tmp)

        # Signal = np.delete(Signal, 0, axis=2)
        Signal = np.stack(Signal, axis=2)
        return Signal, np.array(markNumList).reshape(-1)

    def getDataNdarray(self) -> tuple[np.ndarray, np.ndarray]:
        return self.targetData, self.nonTargetData

    def getDataArraySize(self) -> tuple[int, int]:
        return len(self.targetData), len(self.nonTargetData)

    def getSelectChoice(self) -> list[int, int, int]:
        return self.nchannel

    def analyzeIntervalMarkInCSV(self, file) -> None:
        data = np.loadtxt(file, delimiter=self.csvDelimiter)
        indexData = np.nonzero(data[:, 22])[0][12:]
        # print(indexData)
        listData = []
        tmpList = []
        count = 0
        for i in range(len(indexData) - 1):
            if count % 12 == 0 and count != 0:
                listData.append(tmpList)
                tmpList = []
            count += 1
            tmpList.append(indexData[i])
        # print(listData)
        sum = 0
        count = 0
        for i in listData:
            for j in range(len(i) - 2):
                sum += i[j + 1] - i[j]
                count += 1
        print(sum / count)  # 232.55263157894737，意味着大约闪烁时间为230ms


if __name__ == "__main__":
    path = r"F:/DataSet/data/trainData"
    dataSet = LoadMachineDataSet(
        path, [4, 5, 6], 22, r"F:\DataSet\data\TargetWord.json", "\t"
    )
    tar, nonTar = dataSet.getDataNdarray()
    print(tar.shape, nonTar.shape)
