import os
import pandas as pd
import numpy as np
import requests
import boto3
from sqlalchemy import create_engine
from typing import Tuple, Dict, Any, Optional


class DataConnector:
    """多源数据连接器，支持CSV、数据库、API和对象存储"""

    def __init__(self, config, logger):
        self.config = config
        self.logger = logger
        self.source_type = config.get('data.source_type', 'csv')
        self.source = config.get('data.source')
        self.source_params = config.get('data.source_params', {})

        # 解析环境变量
        self._resolve_env_vars()

    def _resolve_env_vars(self) -> None:
        """解析参数中的环境变量引用"""
        for key, value in self.source_params.items():
            if isinstance(value, str) and value.startswith("${") and value.endswith("}"):
                env_var = value[2:-1]
                self.source_params[key] = os.getenv(env_var, value)

    def connect(self) -> Any:
        """根据数据源类型创建连接"""
        if self.source_type == 'mysql':
            return self._connect_mysql()
        elif self.source_type == 's3':
            return self._connect_s3()
        elif self.source_type == 'api':
            return self._connect_api()
        # CSV不需要持久连接
        return None

    def _connect_mysql(self) -> Any:
        """创建MySQL连接"""
        try:
            user = self.source_params.get('user')
            password = self.source_params.get('password')
            host, db_table = self.source.split('?table=')
            engine = create_engine(f"{host}?user={user}&password={password}")
            self.logger.info(f"成功连接到MySQL数据库: {host}")
            return engine
        except Exception as e:
            self.logger.error(f"MySQL连接失败: {str(e)}")
            raise

    def _connect_s3(self) -> Any:
        """创建S3连接"""
        try:
            s3 = boto3.client(
                's3',
                aws_access_key_id=self.source_params.get('access_key'),
                aws_secret_access_key=self.source_params.get('secret_key')
            )
            self.logger.info("成功连接到S3存储")
            return s3
        except Exception as e:
            self.logger.error(f"S3连接失败: {str(e)}")
            raise

    def _connect_api(self) -> Any:
        """API连接验证"""
        try:
            response = requests.head(self.source, timeout=self.source_params.get('timeout', 30))
            if response.status_code < 400:
                self.logger.info(f"API连接验证成功: {self.source}")
                return True
            raise Exception(f"API验证失败，状态码: {response.status_code}")
        except Exception as e:
            self.logger.error(f"API连接失败: {str(e)}")
            raise

    def load_data(self, connection: Any = None) -> pd.DataFrame:
        """加载数据"""
        if self.source_type == 'csv':
            return self._load_csv()
        elif self.source_type == 'mysql':
            return self._load_mysql(connection)
        elif self.source_type == 's3':
            return self._load_s3(connection)
        elif self.source_type == 'api':
            return self._load_api()
        else:
            raise ValueError(f"不支持的数据源类型: {self.source_type}")

    def _load_csv(self) -> pd.DataFrame:
        """加载CSV文件"""
        try:
            return pd.read_csv(self.source)
        except Exception as e:
            self.logger.error(f"CSV文件加载失败: {str(e)}")
            raise

    def _load_mysql(self, engine) -> pd.DataFrame:
        """从MySQL加载数据"""
        try:
            _, table = self.source.split('?table=')
            query = f"SELECT * FROM {table}"
            return pd.read_sql(query, engine)
        except Exception as e:
            self.logger.error(f"MySQL数据加载失败: {str(e)}")
            raise

    def _load_s3(self, s3) -> pd.DataFrame:
        """从S3加载数据"""
        try:
            bucket, key = self.source.split('/', 1)
            obj = s3.get_object(Bucket=bucket, Key=key)
            return pd.read_csv(obj['Body'])
        except Exception as e:
            self.logger.error(f"S3数据加载失败: {str(e)}")
            raise

    def _load_api(self) -> pd.DataFrame:
        """从API加载数据"""
        try:
            response = requests.get(
                self.source,
                params=self.source_params.get('params', {}),
                timeout=self.source_params.get('timeout', 30)
            )
            response.raise_for_status()
            return pd.DataFrame(response.json())
        except Exception as e:
            self.logger.error(f"API数据加载失败: {str(e)}")
            raise

    def check_data_drift(self, reference_data: pd.DataFrame, current_data: pd.DataFrame,
                         features: list) -> Dict[str, float]:
        """检测数据漂移"""
        drift_scores = {}
        for feature in features:
            if feature not in reference_data.columns or feature not in current_data.columns:
                continue

            # 使用PSI计算分布差异
            psi = self._calculate_psi(
                reference_data[feature].values,
                current_data[feature].values
            )
            drift_scores[feature] = psi

        return drift_scores

    def _calculate_psi(self, expected: np.ndarray, actual: np.ndarray, bins: int = 10) -> float:
        """计算PSI值"""
        try:
            # 创建分箱
            min_val = min(expected.min(), actual.min())
            max_val = max(expected.max(), actual.max())
            bin_edges = np.linspace(min_val, max_val, bins + 1)

            # 计算每个分箱的频率
            expected_counts, _ = np.histogram(expected, bins=bin_edges)
            actual_counts, _ = np.histogram(actual, bins=bin_edges)

            # 转换为比例
            expected_dist = expected_counts / len(expected)
            actual_dist = actual_counts / len(actual)

            # 处理零值
            expected_dist = np.where(expected_dist == 0, 0.0001, expected_dist)
            actual_dist = np.where(actual_dist == 0, 0.0001, actual_dist)

            # 计算PSI
            psi_values = (actual_dist - expected_dist) * np.log(actual_dist / expected_dist)
            return np.sum(psi_values)
        except Exception as e:
            self.logger.warning(f"PSI计算失败: {str(e)}")
            return 0.0