from __future__ import annotations

import os
import glob
import numpy as np
import pandas as pd
from scipy.io import loadmat

# Reuse the same feature engineering implemented in feature_extraction.py
# by embedding a minimal copy of EnhancedBearingFeatureAnalyzer core that
# focuses on signal->feature extraction, while adding domain-aware loaders
# similar to comprehensive_feature_extraction.py.

from feature_extraction import EnhancedBearingFeatureAnalyzer


class DomainFeatureExtractor(EnhancedBearingFeatureAnalyzer):
    """Extract features for Source and Target domains.

    - Source domain: walk through 源域数据集 (12kHz_DE_data, 48kHz_DE_data, 48kHz_Normal_data)
      and infer labels B/IR/OR/N from folder names like comprehensive_feature_extraction.py.
    - Target domain: read all *.mat in 目标域数据集, sampling_rate default 32000,
      keep domain='Target' and no label (or 'Unknown').

    The feature set remains exactly those computed by EnhancedBearingFeatureAnalyzer.
    """

    def __init__(self, project_root: str, target_sr: int = 32000):
        # data_path here should point to the top-level directory that contains 源域数据集 and 目标域数据集
        super().__init__(data_path=project_root)
        self.project_root = project_root
        self.source_root = os.path.join(project_root, '源域数据集')
        self.target_root = os.path.join(project_root, '目标域数据集')
        self.target_sr = target_sr

    # ---------- Utilities ----------
    def load_data(self):
        """加载所有数据"""
        print("Loading data...")
        self.source_data = self._load_domain_data("源域数据集", is_source=True)
        self.target_data = self._load_domain_data("目标域数据集", is_source=False)
        print(f"Loaded: {len(self.source_data)} source files, {len(self.target_data)} target files")

    def _load_domain_data(self, domain_path, is_source=True):
        """统一的数据加载函数"""
        data = {}
        full_path = os.path.join(self.data_path, domain_path)

        if is_source:
            # 源域数据加载
            for sr_dir in ["12kHz_DE_data", "48kHz_DE_data"]:
                sr_path = os.path.join(full_path, sr_dir)
                if os.path.exists(sr_path):
                    sampling_rate = 12000 if "12kHz" in sr_dir else 48000
                    data.update(self._load_fault_data(sr_path, sampling_rate))

            # 正常数据
            normal_path = os.path.join(full_path, "48kHz_Normal_data")
            if os.path.exists(normal_path):
                data.update(self._load_normal_data(normal_path, 48000))
        else:
            # 目标域数据加载
            for mat_file in glob.glob(os.path.join(full_path, "*.mat")):
                try:
                    signal_data = self._extract_signal_data(loadmat(mat_file))
                    if signal_data is not None:
                        filename = os.path.basename(mat_file)
                        data[filename] = {
                            'data': signal_data,
                            'sampling_rate': self.target_sr,
                            'location': 'DE'
                        }
                except Exception as e:
                    print(f"Failed to load: {mat_file}, Error: {e}")

        return data

    def _load_fault_data(self, base_path, sampling_rate):
        """加载故障数据"""
        data = {}
        for fault_type in ['B', 'IR', 'OR']:
            fault_path = os.path.join(base_path, fault_type)
            if os.path.exists(fault_path):
                for load_dir in os.listdir(fault_path):
                    load_path = os.path.join(fault_path, load_dir)
                    if os.path.isdir(load_path):
                        for mat_file in glob.glob(os.path.join(load_path, "*.mat")):
                            try:
                                signal_data = self._extract_signal_data(loadmat(mat_file))
                                if signal_data is not None:
                                    filename = os.path.basename(mat_file)
                                    key = f"{sampling_rate}Hz_{fault_type}_{load_dir}_{filename}"
                                    data[key] = {
                                        'data': signal_data,
                                        'fault_type': self.fault_types[fault_type],
                                        'sampling_rate': sampling_rate,
                                        'location': 'DE'
                                    }
                            except Exception as e:
                                print(f"Failed to load: {mat_file}, Error: {e}")
        return data

    def _load_normal_data(self, normal_path, sampling_rate):
        """加载正常数据"""
        data = {}
        for mat_file in glob.glob(os.path.join(normal_path, "*.mat")):
            try:
                signal_data = self._extract_signal_data(loadmat(mat_file))
                if signal_data is not None:
                    filename = os.path.basename(mat_file)
                    data[f"{sampling_rate}Hz_Normal_{filename}"] = {
                        'data': signal_data,
                        'fault_type': 'Normal',
                        'sampling_rate': sampling_rate,
                        'location': 'DE'
                    }
            except Exception as e:
                print(f"Failed to load: {mat_file}, Error: {e}")
        return data

    def _extract_signal_data(self, mat_data):
        """提取信号数据"""
        possible_keys = ['X', 'x', 'data', 'signal', 'vibration', 'DE', 'FE', 'BA']

        for key in possible_keys:
            if key in mat_data:
                data = mat_data[key]
                if isinstance(data, np.ndarray):
                    return data.flatten() if data.ndim > 1 else data

        # 尝试其他字段
        for key in mat_data.keys():
            if not key.startswith('__'):
                data = mat_data[key]
                if isinstance(data, np.ndarray) and data.size > 1000:
                    return data.flatten() if data.ndim > 1 else data
        return None

    @staticmethod
    def _extract_signal_from_mat(mat: dict) -> tuple[np.ndarray | None, float | None]:
        signal_data = None
        rpm = None
        for key in mat.keys():
            if 'DE_time' in key:
                signal_data = mat[key].flatten()
                break
            elif 'FE_time' in key:
                signal_data = mat[key].flatten()
                break
        for key in mat.keys():
            if 'RPM' in key:
                try:
                    rpm = float(np.ravel(mat[key])[0])
                except Exception:
                    rpm = None
                break
        return signal_data, rpm

    # ---------- Loaders ----------
    def load_source_domain(self, max_files_per_category: int | None = None) -> pd.DataFrame:
        features = []
        labels = []

        if not os.path.isdir(self.source_root):
            print(f"源域路径不存在: {self.source_root}")
            return pd.DataFrame()

        # Scan 12kHz_DE_data, 48kHz_DE_data
        for sr_dir in ['12kHz_DE_data', '48kHz_DE_data']:
            base = os.path.join(self.source_root, sr_dir)
            if not os.path.isdir(base):
                continue
            fs = 12000 if '12kHz' in sr_dir else 48000

            # B / IR folders with sizes
            for fault_type in ['B', 'IR']:
                fpath = os.path.join(base, fault_type)
                if not os.path.isdir(fpath):
                    continue
                for size_dir in os.listdir(fpath):
                    size_path = os.path.join(fpath, size_dir)
                    if not os.path.isdir(size_path):
                        continue
                    mats = glob.glob(os.path.join(size_path, '*.mat'))
                    if max_files_per_category and len(mats) > max_files_per_category:
                        mats = list(np.random.choice(mats, max_files_per_category, replace=False))
                    for mat_file in mats:
                        try:
                            print(f'正在处理{mat_file}')
                            mat = loadmat(mat_file)
                            sig, rpm = self._extract_signal_from_mat(mat)
                            if sig is None or len(sig) < 1000:
                                continue
                            # clip to ~10s like original
                            sig = sig[:min(len(sig), int(10 * fs))]
                            bearing_type = 'SKF6205' if 'DE' in sr_dir else 'SKF6203'
                            feats = self.extract_comprehensive_features(sig, fs, rpm or 1797, bearing_type)
                            feats['filename'] = os.path.basename(mat_file)
                            feats['sampling_rate'] = fs
                            feats['signal_length'] = len(sig)
                            features.append(feats)
                            labels.append(fault_type)
                        except Exception as e:
                            print(f"源域读取失败: {mat_file}, {e}")

            # OR with positions and sizes
            or_base = os.path.join(base, 'OR')
            if os.path.isdir(or_base):
                for pos in os.listdir(or_base):
                    pos_path = os.path.join(or_base, pos)
                    if not os.path.isdir(pos_path):
                        continue
                    for size_dir in os.listdir(pos_path):
                        size_path = os.path.join(pos_path, size_dir)
                        if not os.path.isdir(size_path):
                            continue
                        mats = glob.glob(os.path.join(size_path, '*.mat'))
                        if max_files_per_category and len(mats) > max_files_per_category:
                            mats = list(np.random.choice(mats, max_files_per_category, replace=False))
                        for mat_file in mats:
                            try:
                                mat = loadmat(mat_file)
                                sig, rpm = self._extract_signal_from_mat(mat)
                                if sig is None or len(sig) < 1000:
                                    continue
                                sig = sig[:min(len(sig), int(10 * fs))]
                                bearing_type = 'SKF6205' if 'DE' in sr_dir else 'SKF6203'
                                feats = self.extract_comprehensive_features(sig, fs, rpm or 1797, bearing_type)
                                feats['filename'] = os.path.basename(mat_file)
                                feats['sampling_rate'] = fs
                                feats['signal_length'] = len(sig)
                                features.append(feats)
                                labels.append('OR')
                            except Exception as e:
                                print(f"源域读取失败: {mat_file}, {e}")

        # Normal data (48kHz_Normal_data)
        normal_dir = os.path.join(self.source_root, '48kHz_Normal_data')
        if os.path.isdir(normal_dir):
            fs = 48000
            mats = glob.glob(os.path.join(normal_dir, '*.mat'))
            if max_files_per_category and len(mats) > max_files_per_category:
                mats = list(np.random.choice(mats, max_files_per_category, replace=False))
            for mat_file in mats:
                try:
                    mat = loadmat(mat_file)
                    sig, rpm = self._extract_signal_from_mat(mat)
                    if sig is None or len(sig) < 1000:
                        continue
                    sig = sig[:min(len(sig), int(10 * fs))]
                    feats = self.extract_comprehensive_features(sig, fs, rpm or 1797, 'SKF6205')
                    feats['filename'] = os.path.basename(mat_file)
                    feats['sampling_rate'] = fs
                    feats['signal_length'] = len(sig)
                    features.append(feats)
                    labels.append('N')
                except Exception as e:
                    print(f"源域读取失败: {mat_file}, {e}")

        if not features:
            return pd.DataFrame()

        df = pd.DataFrame(features)
        df['domain'] = 'Source'
        df['label'] = labels
        return df

    def load_target_domain(self, max_files: int | None = None) -> pd.DataFrame:
        features = []
        if not os.path.isdir(self.target_root):
            print(f"目标域路径不存在: {self.target_root}")
            return pd.DataFrame()

        mats = glob.glob(os.path.join(self.target_root, '*.mat'))
        if max_files and len(mats) > max_files:
            mats = list(np.random.choice(mats, max_files, replace=False))

        fs = self.target_sr
        for mat_file in mats:
            try:
                mat = loadmat(mat_file)
                sig = self._extract_signal_data(mat)
                if sig is None or len(sig) < 1000:
                    continue
                # resample not applied here; we keep same approach as base analyzer which uses fs given
                sig = sig[:min(len(sig), int(10 * fs))]
                feats = self.extract_comprehensive_features(sig, fs,  600, 'SKF6205')
                feats['filename'] = os.path.basename(mat_file)
                feats['sampling_rate'] = fs
                feats['signal_length'] = len(sig)
                features.append(feats)
            except Exception as e:
                print(f"目标域读取失败: {mat_file}, {e}")

        if not features:
            return pd.DataFrame()

        df = pd.DataFrame(features)
        df['domain'] = 'Target'
        return df


def build_and_save_domain_dataset(project_root: str,
                                  out_csv: str = os.path.join('results', 'domain_features.csv'),
                                  max_files_per_category: int | None = None,
                                  max_target_files: int | None = None) -> pd.DataFrame:
    os.makedirs(os.path.dirname(out_csv), exist_ok=True)
    extractor = DomainFeatureExtractor(project_root)

    target_df = extractor.load_target_domain(max_files=max_target_files)
    source_df = extractor.load_source_domain(max_files_per_category=max_files_per_category)
    combined = pd.concat([source_df, target_df], ignore_index=True)
    combined.to_csv(out_csv, index=False, encoding='utf-8-sig')
    print(f"Saved combined domain dataset: {combined.shape} -> {out_csv}")
    return combined


def main():
    import argparse
    parser = argparse.ArgumentParser(description='Extract Source/Target domain features using feature_extraction logic')
    parser.add_argument('--root', default='..\\..', help='Project root containing 源域数据集/ 目标域数据集/')
    parser.add_argument('--out', default=os.path.join('results', 'domain_features.csv'))
    parser.add_argument('--max-per-category', type=int, default=None)
    parser.add_argument('--max-target', type=int, default=None)
    args = parser.parse_args()

    build_and_save_domain_dataset(args.root, args.out, args.max_per_category, args.max_target)


if __name__ == '__main__':
    main()


