import mne
import numpy as np
import os
import glob
import h5py
import warnings

from google.protobuf.struct_pb2 import NULL_VALUE
from sklearn.preprocessing import StandardScaler
from mne.preprocessing import ICA
import datetime  # 用于获取当前时间，以便在日志中加上时间戳
import matplotlib
import matplotlib.pyplot as plt
from collections import Counter
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg
import tkinter as tk
from tkinter import ttk
import collections
from scipy import stats
from tqdm import tqdm  # 导入tqdm模块，用于显示进度条
warnings.filterwarnings("ignore", message="Channels contain different highpass filters")
warnings.filterwarnings("ignore", message="Channels contain different lowpass filters")
warnings.filterwarnings("ignore", message="Highpass cutoff frequency.*is greater than lowpass cutoff frequency")


def log_message(message):
    log_file_path = os.path.join("F:\\EEGDataset\\MASS\\data_anno_savefiles", '数据预处理日志.txt')
    now = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
    with open(log_file_path, 'a') as log_file:  # 以追加模式打开文件
        log_file.write(f"{now} - {message}\n")  # 写入时间戳和消息

class DataPreprossing:
    def __init__(self):
        print("DataPreprossing __init__")
        self.data_files_path = "F:\\EEGDataset\\MASS\\\\eegdata"
        self.anno_files_path = "F:\\EEGDataset\\MASS\\\\annotation"
        self.save_files_path =  "F:\\EEGDataset\\MASS\\\\data_anno_savefiles"
        self.files_counts = 0    # 文件总数
        self.data_files_names = []   # 初始化文件名列表
        self.base_files_names = []
        self.anno_files_names = []
        self.save_files_names = []

        self.raw_data = NULL_VALUE
        self.raw_annotations = NULL_VALUE
        self.data_durations = NULL_VALUE

        self.second_time = 30
        self.sampling_frequency = 256
        self.labels = []
        self.label_length = 0
        self.data = {}
        self.data_length = 0
        self.desired_channel_names = ['EEG C3', 'EEG C4', 'EEG F3', 'EEG F4']
        self.channel_names = []

    def save_files(self , file_index):
        with h5py.File(self.save_files_names[file_index], 'w') as f:
            eeg_group = f.create_group('data')
            for channel in self.channel_names:
                eeg_group.create_dataset(f'{channel}_data', data = self.data[channel])

            labels_group = f.create_group('labels')
            labels_group.create_dataset('stage_labels', data = self.labels)

    def get_files_names(self):
        # 获取edf数据名和相应的注释 并进行校验
        self.data_files_names = glob.glob(os.path.join(self.data_files_path, '*PSG.edf'))

        self.base_files_names = [os.path.basename(file_name)[:-8] for file_name in self.data_files_names]

        self.anno_files_names = [glob.glob(os.path.join(self.anno_files_path, f"{base_name} Annotations.edf"))[0] for base_name in self.base_files_names]

        self.save_files_names = [os.path.join(self.save_files_path, f"{base_name}.h5") for base_name in self.base_files_names]
        self.files_counts = len(self.data_files_names)

    def get_data_labels(self):
        self.data = {} # 初始化
        self.labels = []
        stages_mapping = {
            'Sleep stage W': 0,
            'Sleep stage 1': 1,
            'Sleep stage 2': 2,
            'Sleep stage 3': 3,
            'Sleep stage 4': 3,
            'Sleep stage R': 4,
        }

        # 计算总迭代次数
        total = len(self.raw_annotations.onset)

        # 用 tqdm 包装迭代器
        for onset, duration, description in tqdm(
                zip(self.raw_annotations.onset, self.raw_annotations.duration, self.raw_annotations.description),
                total=total,  # 总进度
                desc="Processing"  # 进度条描述
        ):
            label_value = stages_mapping.get(description, -1)
            if label_value == -1:
                continue
            self.labels.append(label_value)

            start_sample = int(onset * self.sampling_frequency)
            end_sample = int((onset + duration) * self.sampling_frequency)
            for channel in self.channel_names:
                data_segment = self.raw_data.get_data(picks=channel)[0, start_sample:end_sample]
                if channel not in self.data:
                    self.data[channel] = data_segment
                else:
                    self.data[channel] = np.concatenate((self.data[channel], data_segment), axis=0)

        self.label_length = int(len(self.labels))
        self.data_length = self.data[self.channel_names[0]].shape[0]

        # 数据分段
        segment_length = int(self.second_time * self.sampling_frequency)
        segment_numbers = (self.data_length / segment_length)
        if not segment_numbers.is_integer():
            print("segment_numbers 是非整数")
            input()
        for channel in self.channel_names:
            self.data[channel] = self.data[channel].reshape(int(segment_numbers), segment_length)

        print(f"原数据长度：{self.data_length}")
        print(f"标签长度：{self.label_length}")
        print(f"数据形状 {self.data[self.channel_names[0]].shape}")
        log_message(f"数据长度：{self.data_length}")
        log_message(f"标签长度：{self.label_length}")
        log_message(f"数据形状 {self.data[self.channel_names[0]].shape}")


    def data_preprocessing(self):
        self.get_files_names()
        for file in range(self.files_counts):

            if os.path.exists(self.save_files_names[file]):
                print(f"文件 {self.save_files_names[file]} 存在")
            else:
                print(f"文件 {self.save_files_names[file]} 不存在 开始处理文件")
                fif_file_name = f"{self.data_files_names[file].split('/')[-1].replace('.edf', '')}_raw.fif"
                if not os.path.exists(fif_file_name):
                    print(f"{fif_file_name} does not exitst")
                    self.raw_data = mne.io.read_raw_edf(self.data_files_names[file], preload=True, verbose=True)
                    self.raw_data.save(fif_file_name, overwrite=True)
                else:
                    print(f"{fif_file_name} exitsts")
                    self.raw_data = mne.io.read_raw_fif(fif_file_name, preload=True)

                self.channel_names = []
                # 获取相应通道名称
                for ch_name in self.raw_data.info['ch_names']:
                    for desired in self.desired_channel_names:
                        if desired in ch_name:  # 模糊匹配
                            self.channel_names.append(ch_name)
                print("Selected channels:", self.channel_names)  # 输出匹配到的通道名

                log_message(f"正在处理第{file + 1 }个文件: {self.data_files_names[file]}")

                # self.sampling_frequency = self.raw_data.info["sfreq"]

                self.raw_annotations = mne.read_annotations(self.anno_files_names[file])
                self.data_durations = self.raw_data.times[-1]
                self.raw_data.resample(256)
                log_message(f"sampling_frequency : {self.sampling_frequency}")
                log_message(f"data_durations : {self.data_durations}")

                self.get_data_labels()
                self.save_files(file)

                log_message(f"\r\n")

