import mne
import numpy as np
import os
import glob
import h5py
import warnings

from google.protobuf.struct_pb2 import NULL_VALUE
from sklearn.preprocessing import StandardScaler
from mne.preprocessing import ICA
import datetime  # 用于获取当前时间，以便在日志中加上时间戳
import matplotlib
import matplotlib.pyplot as plt
from collections import Counter
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg
import tkinter as tk
from tkinter import ttk
import collections
from scipy import stats

warnings.filterwarnings("ignore", message="Channels contain different highpass filters")
warnings.filterwarnings("ignore", message="Channels contain different lowpass filters")
warnings.filterwarnings("ignore", message="Highpass cutoff frequency.*is greater than lowpass cutoff frequency")


def log_message(message):
    log_file_path = os.path.join("G:\\Research\\EEG_Project\\data\\data_anno_savefiles", '数据预处理日志.txt')
    now = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
    with open(log_file_path, 'a') as log_file:  # 以追加模式打开文件
        log_file.write(f"{now} - {message}\n")  # 写入时间戳和消息

class DataPreprossing:
    def __init__(self):
        self.data_files_path = "G:\\Research\\EEG_Project\\data\\eegdata"
        self.anno_files_path = "G:\\Research\\EEG_Project\\data\\annotation"
        self.save_files_path =  "G:\\Research\\EEG_Project\\data\\data_anno_savefiles"
        self.files_counts = 0    # 文件总数
        self.data_files_names = []   # 初始化文件名列表
        self.base_files_names = []
        self.anno_files_names = []
        self.save_files_names = []

        self.raw_data = NULL_VALUE
        self.raw_annotations = NULL_VALUE
        self.data_durations = NULL_VALUE

        self.sampling_frequency = 0
        self.labels = []
        self.label_length = 0
        self.data = {}
        self.data_length = 0
        self.channel_names = ['EEG Fpz-Cz', 'EEG Pz-Oz']
        self.segment_numbers = 0

    def save_files(self , file_index):
        with h5py.File(self.save_files_names[file_index], 'w') as f:
            eeg_group = f.create_group('data')
            for channel in self.channel_names:
                eeg_group.create_dataset(f'{channel}_data', data = self.data[channel])

            labels_group = f.create_group('labels')
            labels_group.create_dataset('stage_labels', data = self.labels)



    def processing_data_labels(self):
        self.labels = np.array(self.labels)

        self.labels = self.labels[:self.data_length]
        log_message(f"跟原数据同步后的标签长度：{len(self.labels)}")

        valid_label_indices = self.labels != -1
        self.labels = self.labels[valid_label_indices]
        log_message(f"去除伪标签后的标签长度：{len(self.labels)}")
        self.segment_numbers = len(self.labels) /( 30 * self.sampling_frequency)

        if not self.segment_numbers.is_integer():
            print("segment_numbers 是非整数")
            input()

        reshaped_labels = self.labels[:int(self.segment_numbers) * 3000].reshape(int(self.segment_numbers), 3000)
        mode_labels = stats.mode(reshaped_labels, axis=1).mode.flatten()

        # 保留Wake开始的最后30分钟和结束的前30分钟
        zero_indices = np.where(mode_labels == 0)[0]
        first_zero_end = zero_indices[np.where(np.diff(zero_indices) > 1)[0][0]] if np.any(np.diff(zero_indices) > 1) else zero_indices[-1]
        last_zero_start = zero_indices[np.where(np.diff(zero_indices) > 1)[0][-1] + 1] if np.any(np.diff(zero_indices) > 1) else zero_indices[0]
        start_keep_indices = int(first_zero_end - 30 * 60 / 30)
        end_keep_indices = int(last_zero_start + 30 * 60 / 30)
        updated_segmented_labels = mode_labels[start_keep_indices:end_keep_indices]
        self.labels = updated_segmented_labels
        self.label_length = len(self.labels)
        log_message(f"分段后且去掉伪标签后的标签长度：{self.label_length}")

        for channel in self.channel_names:
            self.data[channel] = self.data[channel][0][valid_label_indices]
            reshaped_data = self.data[channel].reshape(int(self.segment_numbers), 3000)
            self.data[channel] = reshaped_data[start_keep_indices:end_keep_indices]
        self.data_length = len(self.data[self.channel_names[0]])
        log_message(f"分段后且去掉伪数据后的数据长度：{self.data_length}")

    # 获取所有源数据
    def get_all_data(self):
        for channel in self.channel_names:
            self.data[channel] , _ = self.raw_data[channel, :]
        self.data_length = self.data[self.channel_names[0]].shape[1]
        log_message(f"原数据长度：{self.data_length}")

    # 获取所有标签
    def get_all_labels(self):
        stages_mapping = {
            'Sleep stage W': 0,
            'Sleep stage 1': 1,
            'Sleep stage 2': 2,
            'Sleep stage 3': 3,
            'Sleep stage 4': 3,
            'Sleep stage R': 4,
        }
        all_labels = []
        for onset , duration , description in zip(self.raw_annotations.onset , self.raw_annotations.duration, self.raw_annotations.description):
            label_numbers = int(self.sampling_frequency * duration)
            label_value = stages_mapping.get(description , -1)
            all_labels.extend([label_value] * label_numbers)

        self.labels = all_labels[:self.data_length]
        self.label_length = len(self.labels)
        log_message(f"原标签长度：{self.data_length}")

    def get_files_names(self):
        # 获取edf数据名和相应的注释 并进行校验
        self.data_files_names = glob.glob(os.path.join(self.data_files_path, '*PSG.edf'))
        self.base_files_names = [os.path.basename(file_name)[:-9] for file_name in self.data_files_names]
        self.anno_files_names = [glob.glob(os.path.join(self.anno_files_path, f"{base_name}*-Hypnogram.edf"))[0] for base_name in self.base_files_names]
        self.save_files_names = [os.path.join(self.save_files_path, f"{base_name}.h5") for base_name in self.base_files_names]
        self.files_counts = len(self.data_files_names)

    def data_preprocessing(self):
        self.get_files_names()
        for file in range(self.files_counts):
            self.raw_data = mne.io.read_raw_edf( self.data_files_names[file], preload=True)
            log_message(f"正在处理第{file + 1 }个文件: {self.data_files_names[file]}")
            self.sampling_frequency = self.raw_data.info["sfreq"]
            self.raw_annotations = mne.read_annotations(self.anno_files_names[file])
            self.data_durations = self.raw_data.times[-1]

            self.get_all_data()
            self.get_all_labels()

            self.processing_data_labels()
            self.save_files(file)
            log_message(f"\r\n")

