import os
import dvc.api
import pandas as pd
import numpy as np
import torch
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from sklearn.utils.class_weight import compute_class_weight
from imblearn.over_sampling import SMOTE
from torch.utils.data import TensorDataset, DistributedSampler
from typing import Tuple, Dict, Any, Optional, List

from src.config_loader import ConfigLoader
from src.data_connector import DataConnector


class DataProcessor:
    """数据处理组件，负责数据加载、预处理和增强"""

    def __init__(self, config: ConfigLoader, logger):
        self.config = config
        self.logger = logger
        self.scaler = StandardScaler()
        self.random_seed = config.get('system.random_seed', 42)
        self.connector = DataConnector(config, logger)  # 新增: 多源数据连接器
        self._init_paths()

        # 数据版本控制相关
        self.data_version_dir = config.get('system.artifacts.data_version_dir')
        os.makedirs(self.data_version_dir, exist_ok=True)

    def _init_paths(self) -> None:
        """初始化数据相关路径"""
        self.artifact_dirs = {
            'model': self.config.get('system.artifacts.model_dir'),
            'metric': self.config.get('system.artifacts.metric_dir'),
            'analysis': self.config.get('system.artifacts.analysis_dir')
        }
        for dir_path in self.artifact_dirs.values():
            os.makedirs(dir_path, exist_ok=True)

    def load_data(self, version: Optional[str] = None) -> Tuple[pd.DataFrame, np.ndarray, np.ndarray]:
        """加载原始数据，支持版本控制"""
        try:
            # 如果指定了版本，从DVC加载
            if version:
                self.logger.info(f"加载数据版本: {version}")
                with dvc.api.open(
                        self.config.get('data.source'),
                        repo='.',
                        rev=version
                ) as f:
                    df = pd.read_csv(f)
            else:
                # 从数据源加载
                connection = self.connector.connect()
                df = self.connector.load_data(connection)

            self.logger.info(f"数据加载成功，形状: {df.shape}")

            features = self.config.get('data.features')
            target = self.config.get('data.target')

            # 检查特征是否存在
            missing_features = [f for f in features if f not in df.columns]
            if missing_features:
                raise ValueError(f"数据中缺少特征: {missing_features}")

            x = df[features].values
            y = df[target].values

            return df, x, y
        except Exception as e:
            self.logger.error(f"数据加载失败: {str(e)}")
            raise

    def split_data(self, x: np.ndarray, y: np.ndarray,
                   distributed: bool = False) -> Tuple[np.ndarray, np.ndarray, np.ndarray,
    np.ndarray, np.ndarray, np.ndarray]:
        """划分训练集、验证集和测试集，支持分布式采样"""
        test_ratio = self.config.get('data.split.test_ratio', 0.2)
        val_ratio = self.config.get('data.split.val_ratio', 0.25)
        shuffle = self.config.get('data.split.shuffle', True)
        stratify = y if self.config.get('data.split.stratify', True) else None

        # 先划分训练+验证集和测试集
        x_train_val, x_test, y_train_val, y_test = train_test_split(
            x, y,
            test_size=test_ratio,
            random_state=self.random_seed,
            shuffle=shuffle,
            stratify=stratify
        )

        # 再从训练+验证集中划分训练集和验证集
        x_train, x_val, y_train, y_val = train_test_split(
            x_train_val, y_train_val,
            test_size=val_ratio,
            random_state=self.random_seed,
            shuffle=shuffle,
            stratify=y_train_val if stratify is not None else None
        )

        self.logger.info(f"数据划分完成 - 训练集: {x_train.shape}, 验证集: {x_val.shape}, 测试集: {x_test.shape}")
        return x_train, x_val, x_test, y_train, y_val, y_test

    def balance_data(self, x: np.ndarray, y: np.ndarray) -> Tuple[np.ndarray, np.ndarray]:
        """数据平衡处理"""
        if not self.config.get('data.balance.enable', False):
            return x, y

        method = self.config.get('data.balance.method', 'smote')
        if method == 'smote':
            params = self.config.get('data.balance.params', {})
            smote = SMOTE(random_state=self.random_seed, **params)
            x_resampled, y_resampled = smote.fit_resample(x, y)
            self.logger.info(f"SMOTE过采样完成 - 原始: {x.shape}, 采样后: {x_resampled.shape}")
            return x_resampled, y_resampled
        else:
            self.logger.warning(f"不支持的数据平衡方法: {method}，返回原始数据")
            return x, y

    def preprocess(self, x_train: np.ndarray, x_val: np.ndarray, x_test: np.ndarray) -> Tuple[
        np.ndarray, np.ndarray, np.ndarray]:
        """特征预处理"""
        if self.config.get('data.preprocess.scaling', True):
            x_train = self.scaler.fit_transform(x_train)
            x_val = self.scaler.transform(x_val)
            x_test = self.scaler.transform(x_test)
            self.logger.info("特征标准化完成")
        return x_train, x_val, x_test

    def create_datasets(self, x_train: np.ndarray, x_val: np.ndarray, x_test: np.ndarray,
                        y_train: np.ndarray, y_val: np.ndarray, y_test: np.ndarray,
                        distributed: bool = False) -> Tuple[
        TensorDataset, TensorDataset, TensorDataset, Optional[DistributedSampler]]:
        """创建PyTorch数据集，支持分布式采样器"""
        train_set = TensorDataset(
            torch.tensor(x_train, dtype=torch.float32),
            torch.tensor(y_train, dtype=torch.int64)
        )
        val_set = TensorDataset(
            torch.tensor(x_val, dtype=torch.float32),
            torch.tensor(y_val, dtype=torch.int64)
        )
        test_set = TensorDataset(
            torch.tensor(x_test, dtype=torch.float32),
            torch.tensor(y_test, dtype=torch.int64)
        )

        # 分布式采样器
        train_sampler = DistributedSampler(train_set) if distributed else None

        return train_set, val_set, test_set, train_sampler

    def data_augmentation(self, x: torch.Tensor) -> torch.Tensor:
        """数据增强，增加领域自适应增强"""
        if not self.config.get('data.preprocess.augmentation.enable', False):
            return x

        # 添加噪声
        noise_factor = self.config.get('data.preprocess.augmentation.noise_factor', 0.01)
        noise = torch.randn_like(x) * noise_factor
        x = x + noise

        # 特征扰动
        perturb_prob = self.config.get('data.preprocess.augmentation.perturb_prob', 0.3)
        scale_factors = self.config.get('data.preprocess.augmentation.scale_factors', [1.0] * x.shape[1])
        scale_tensor = torch.FloatTensor(scale_factors).to(x.device)

        mask = torch.rand(x.shape) < perturb_prob
        x = torch.where(mask, x * scale_tensor, x)

        # 领域特定增强：针对手机价格预测的特征交叉增强
        if x.shape[1] >= 4:  # 确保有足够的特征进行交叉
            # 电池容量/内存 (续航性能比)
            battery_power = x[:, 0]
            ram = x[:, 3]
            performance_ratio = battery_power / (ram + 1e-8)  # 避免除零
            x = torch.cat([x, performance_ratio.unsqueeze(1)], dim=1)

            # 屏幕分辨率 (px_height * px_width)
            screen_resolution = x[:, 1] * x[:, 2]
            x = torch.cat([x, screen_resolution.unsqueeze(1)], dim=1)

        return x

    def compute_class_weights(self, y: np.ndarray) -> np.ndarray:
        """计算类别权重"""
        if not self.config.get('model.weights.enable', False):
            return np.ones(len(np.unique(y)))

        # 基础平衡权重
        class_weights = compute_class_weight('balanced', classes=np.unique(y), y=y)

        # 增强特定类别的权重
        boost_classes = self.config.get('model.weights.boost_classes', [])
        boost_factor = self.config.get('model.weights.boost_factor', 1.3)

        for cls in boost_classes:
            if cls in np.unique(y):
                class_weights[cls] *= boost_factor

        # 归一化
        class_weights = class_weights / class_weights.sum()
        self.logger.info(f"计算类别权重: {class_weights}")
        return class_weights

    def check_drift(self, reference_df: pd.DataFrame, current_df: pd.DataFrame) -> Dict[str, float]:
        """检查数据漂移"""
        features = self.config.get('data.features')
        drift_scores = self.connector.check_data_drift(reference_df, current_df, features)

        threshold = self.config.get('data.monitor.drift_threshold', 0.2)
        significant_drift = {f: s for f, s in drift_scores.items() if s > threshold}

        if significant_drift:
            self.logger.warning(f"检测到显著数据漂移: {significant_drift}")
        else:
            self.logger.info("未检测到显著数据漂移")

        return significant_drift