import pandas as pd
import numpy as np
from sklearn.linear_model import LinearRegression
from sqlalchemy import create_engine
from typing import List, Dict, Optional, Literal
import re


class DataProcessor:
    def __init__(self, db_config: Dict, file_paths: List[str]):
        self.db_config = db_config
        self.file_paths = file_paths
        self.dfs = []
        self.merged_df = None
        self.processed_df = None
        self.reduction_params = {}
        self.label_mappings = {}  # 保留原始类别映射（便于后续追溯）

    def load_data(self, encoding: str = 'utf-8-sig') -> None:
        """加载CSV数据（适配UTF-8编码，解决解码错误）"""
        try:
            for path in self.file_paths:
                df = pd.read_csv(path, encoding=encoding)
                self.dfs.append(df)
                print(f"读取文件 {path}，形状: {df.shape}")
        except Exception as e:
            print(f"加载数据失败: {e}")
            print("提示：若仍报错，可尝试 encoding='utf-8' 或 'gb2312'")
            raise

    def clean_data(self, fill_strategy: str = 'mean') -> None:
        """数据清洗：处理缺失值和异常值"""
        cleaned_dfs = []
        for i, df in enumerate(self.dfs):
            original_shape = df.shape
            # 删除关键列空值
            cols_to_drop_na = ['building_structure', 'decoration', 'property_right']
            for col in cols_to_drop_na:
                if col in df.columns:
                    df = df.dropna(subset=[col])
            # 填充缺失值
            df = self._handle_missing(df, fill_strategy)
            # 处理异常值
            df, outliers = self._handle_outliers(df)
            cleaned_dfs.append(df)
            print(f"数据集{i + 1}清洗后形状: {df.shape} (原始: {original_shape})")
        self.dfs = cleaned_dfs

    def integrate_data(self, key_col: str = 'area', cross_cols: Optional[List[str]] = None) -> None:
        """数据集成（多文件合并）"""
        if not self.dfs:
            raise ValueError("无数据可集成，请先加载数据")
        self.merged_df = self.dfs[0]
        for df in self.dfs[1:]:
            self.merged_df = self.merged_df.merge(
                df, on=key_col, how='outer',
                suffixes=('', f'_dup_{len(self.merged_df.columns)}')
            )
            # 删除重复列
            dup_cols = [col for col in self.merged_df.columns if col.startswith(f'{key_col}_dup_')]
            self.merged_df = self.merged_df.drop(columns=dup_cols)
        # 生成交叉特征
        if cross_cols and len(cross_cols) >= 2:
            col1, col2 = cross_cols[:2]
            if col1 in self.merged_df.columns and col2 in self.merged_df.columns:
                self.merged_df[f'{col1}_与_{col2}_差值'] = self.merged_df[col1] - self.merged_df[col2]
                self.merged_df[f'{col1}_与_{col2}_比值'] = self.merged_df[col1] / self.merged_df[col2].replace(0, 0.001)
        print(f"数据集成后形状: {self.merged_df.shape}")

    def reduce_data(self,
                    irrelevant_cols: List[str] = None,
                    core_business_cols: List[str] = None,
                    reduction_type: Literal['non_param', 'param_linear', 'param_loglinear'] = 'non_param',
                    sample_frac: float = 0.1,
                    group_col: str = 'area') -> None:
        """数据规约：保留核心字段，删除无关字段"""
        if self.merged_df is None:
            raise ValueError("请先执行数据集成")
        # 核心字段默认值（适配CSV实际列名）
        if not core_business_cols:
            core_business_cols = [
                'address', 'area', 'crawl_time','direction', 'house_type', 'surrounding', 'title',
                'floor', 'house_area', 'price', 'unit_price', 'people', 'esf_link'
            ]
        # 无关字段默认值
        default_irrelevant = ['备注', '数据采集人员', '采集设备编号', '临时标记', '无效字段']
        if irrelevant_cols:
            default_irrelevant.extend(irrelevant_cols)
        irrelevant_cols = list(set(default_irrelevant))

        # 筛选存在的字段
        existing_core_cols = [col for col in core_business_cols if col in self.merged_df.columns]
        existing_irrelevant_cols = [col for col in irrelevant_cols if col in self.merged_df.columns]

        # 保留核心字段+非无关字段
        all_cols = self.merged_df.columns.tolist()
        keep_cols = existing_core_cols + [
            col for col in all_cols
            if col not in existing_core_cols and col not in existing_irrelevant_cols
        ]

        self.processed_df = self.merged_df[keep_cols].copy()
        print(f"维度规约：原始列{len(all_cols)}列 → 保留列{len(keep_cols)}列")
        print(f"直接删除的不相关列：{existing_irrelevant_cols}")

        # 数据量规约（抽样）
        if reduction_type == 'non_param':
            if group_col in self.processed_df.columns:
                group_data = self.processed_df[group_col]
                if isinstance(group_data, pd.DataFrame):
                    group_data = group_data.iloc[:, 0]
                if group_data.nunique() > 1:
                    self.processed_df = self.processed_df.groupby(
                        group_data, group_keys=False
                    ).apply(
                        lambda x: x.sample(frac=sample_frac) if len(x) > 10 else x
                    )
            print(f"无参数数据量规约后形状: {self.processed_df.shape}")

    def transform_data(self) -> None:
        """数据变换：按要求处理所有字段"""
        if self.processed_df is None:
            self.processed_df = self.merged_df.copy()

        # 1. address字段：只保留空格前的内容
        if 'address' in self.processed_df.columns:
            self.processed_df['address'] = self.processed_df['address'].astype(str).str.split(' ').str[0]
            print("address字段处理完成：保留空格前内容")

        # 2. 提取数字字段（floor、house_area、price、unit_price）
        self._extract_numbers()

        # 3. 独热编码：address、area、direction、house_type、surrounding
        onehot_cols = ['address', 'area', 'direction', 'house_type', 'surrounding']
        self._onehot_encoding(onehot_cols)

        if 'crawl_time' in self.processed_df.columns:
            # 4.1 将 crawl_time 转换为 datetime 格式（适配“2025-11”→自动补全为“2025-11-01”）
            self.processed_df['crawl_time'] = pd.to_datetime(self.processed_df['crawl_time'], errors='coerce')

            # 4.2 关键修改：只保留年月日，去除时分秒（转成字符串格式 YYYY-MM-DD）
            self.processed_df['crawl_time'] = self.processed_df['crawl_time'].dt.date.astype(str)

            print("crawl_time字段处理完成：已格式化为纯年月日（YYYY-MM-DD），无时分秒")
        else:
            print("警告：数据中未找到 'crawl_time' 列，跳过日期处理。")

        print("数据变换完成")

    def _extract_numbers(self) -> None:
        """提取字段中的纯数字（floor、house_area、price、unit_price）"""
        # floor：提取层数数字（如“共33层”→33，“底层（共36层）”→36）
        if 'floor' in self.processed_df.columns:
            self.processed_df['floor'] = self.processed_df['floor'].astype(str).apply(
                lambda x: re.findall(r'共(\d+)层', x)[0] if re.findall(r'共(\d+)层', x) else 0
            ).astype(int)
            print("floor字段处理完成：提取总层数数字")

        # house_area：提取面积数字（如“172.73㎡”→172.73）
        if 'house_area' in self.processed_df.columns:
            self.processed_df['house_area'] = self.processed_df['house_area'].astype(str).apply(
                lambda x: re.findall(r'(\d+\.?\d*)', x)[0] if re.findall(r'(\d+\.?\d*)', x) else 0
            ).astype(float)
            print("house_area字段处理完成：提取面积数字")

        # price：提取价格数字（如“520万”→520）
        if 'price' in self.processed_df.columns:
            self.processed_df['price'] = self.processed_df['price'].astype(str).apply(
                lambda x: re.findall(r'(\d+\.?\d*)', x)[0] if re.findall(r'(\d+\.?\d*)', x) else 0
            ).astype(float)
            print("price字段处理完成：提取价格数字")

        # unit_price：提取单价数字（如“30104元/㎡”→30104）
        if 'unit_price' in self.processed_df.columns:
            self.processed_df['unit_price'] = self.processed_df['unit_price'].astype(str).apply(
                lambda x: re.findall(r'(\d+\.?\d*)', x)[0] if re.findall(r'(\d+\.?\d*)', x) else 0
            ).astype(float)
            print("unit_price字段处理完成：提取单价数字")

    def _onehot_encoding(self, cols: List[str]) -> None:
        """对指定字段进行独热编码（保留高频类别，避免维度爆炸）"""
        for col in cols:
            if col not in self.processed_df.columns:
                print(f"{col}字段不存在，跳过独热编码")
                continue

            # 保存原始类别映射（便于追溯）
            unique_cats = self.processed_df[col].unique()
            self.label_mappings[col] = {cat: idx for idx, cat in enumerate(unique_cats)}

            # 对高频类别编码（出现次数≥2），低频类别归为“其他”
            value_counts = self.processed_df[col].value_counts()
            high_freq_cats = value_counts[value_counts >= 2].index.tolist()
            self.processed_df[col + '_processed'] = self.processed_df[col].apply(
                lambda x: x if x in high_freq_cats else '其他'
            )

            # 独热编码
            onehot_df = pd.get_dummies(self.processed_df[col + '_processed'], prefix=col)
            self.processed_df = pd.concat([self.processed_df, onehot_df], axis=1)

            # 删除原始字段和中间处理字段
            self.processed_df.drop([col, col + '_processed'], axis=1, inplace=True)
            print(f"{col}字段独热编码完成：生成{len(onehot_df.columns)}个编码列")

    def save_to_mysql(self, table_name: str = 'processed_house_data', if_exists: str = 'replace') -> None:
        """将处理后的数据写入MySQL"""
        if self.processed_df is None:
            raise ValueError("无处理后的数据可保存")
        try:
            # 数据库连接
            engine = create_engine(
                f"mysql+pymysql://{self.db_config['username']}:{self.db_config['password']}@"
                f"{self.db_config['host']}:{self.db_config['port']}/{self.db_config['database']}"
            )
            # 清洗列名（避免数据库报错）
            self.processed_df.columns = [
                col.replace('(', '').replace(')', '').replace(' ', '_').replace('__', '_')
                for col in self.processed_df.columns
            ]
            # 写入数据库
            self.processed_df.to_sql(table_name, engine, if_exists=if_exists, index=False, chunksize=1000)
            print(f"✅ 处理后的数据已写入MySQL表: {table_name}")
            print(f"📊 写入数据形状: {self.processed_df.shape}")
        except Exception as e:
            print(f"❌ MySQL写入失败: {e}")
            raise

    def save_label_mappings(self, table_name: str = 'house_label_mappings') -> None:
        """保存类别映射表（用于独热编码追溯）"""
        if not self.label_mappings:
            print("无类别映射可保存")
            return
        try:
            mapping_data = []
            for col_name, cat_map in self.label_mappings.items():
                for cat, idx in cat_map.items():
                    mapping_data.append({
                        'column_name': col_name,
                        'original_category': cat,
                        'category_index': idx
                    })
            mappings_df = pd.DataFrame(mapping_data)
            engine = create_engine(
                f"mysql+pymysql://{self.db_config['username']}:{self.db_config['password']}@"
                f"{self.db_config['host']}:{self.db_config['port']}/{self.db_config['database']}"
            )
            mappings_df.to_sql(table_name, engine, if_exists='replace', index=False)
            print(f"✅ 类别映射表已写入MySQL表: {table_name}")
        except Exception as e:
            print(f"❌ 映射表写入失败: {e}")
            raise

    def _handle_missing(self, df: pd.DataFrame, strategy: str) -> pd.DataFrame:
        """处理缺失值"""
        if strategy == 'drop':
            return df.dropna()
        elif strategy in ['mean', 'median']:
            # 数值型字段填充均值/中位数
            for col in df.select_dtypes(include=[np.number]):
                fill_val = df[col].mean() if strategy == 'mean' else df[col].median()
                df[col] = df[col].fillna(fill_val)
            # 非数值型字段填充众数
            for col in df.select_dtypes(exclude=[np.number]):
                fill_val = df[col].mode().iloc[0] if not df[col].mode().empty else '未知'
                df[col] = df[col].fillna(fill_val)
            return df
        else:
            raise ValueError(f"不支持的填充策略: {strategy}")

    def _handle_outliers(self, df: pd.DataFrame) -> (pd.DataFrame, Dict):
        """处理数值型字段异常值（IQR方法）"""
        outlier_info = {}
        for col in df.select_dtypes(include=[np.number]):
            Q1, Q3 = df[col].quantile(0.25), df[col].quantile(0.75)
            IQR = Q3 - Q1
            lower, upper = Q1 - 1.5 * IQR, Q3 + 1.5 * IQR
            outliers = df[(df[col] < lower) | (df[col] > upper)][col]
            if not outliers.empty:
                outlier_info[col] = outliers.tolist()
                df = df[(df[col] >= lower) & (df[col] <= upper)]
        return df, outlier_info


if __name__ == "__main__":
    # -------------------------- 配置参数 --------------------------
    # 数据库配置（根据你的实际MySQL信息修改）
    db_config = {
        "host": "115tt3208yh53.vicp.fun",
        "port": 13245,
        "database": "ry-vue",
        "username": "root",
        "password": "wlw565wlw"
    }
    # CSV文件路径（替换为你的本地文件实际路径，Windows用r""包裹）
    file_paths = [
        "D:/NiuHaochen.code/DataAnalysisSystem/fangtianxia_redis/xian_house_price_csv/xian_esf_house.csv"
    ]
    # -------------------------------------------------------------

    # 初始化处理器并执行流程
    processor = DataProcessor(db_config, file_paths)
    try:
        processor.load_data()  # 1. 加载数据（已适配utf-8-sig编码）
        processor.clean_data()  # 2. 清洗数据（缺失值+异常值）
        processor.integrate_data()  # 3. 集成数据（多文件合并）
        processor.reduce_data(reduction_type='param_linear')  # 4. 数据规约（保留核心字段）
        processor.transform_data()  # 5. 数据变换（按要求处理字段）
        processor.save_to_mysql()  # 6. 写入处理后的数据
        processor.save_label_mappings()  # 7. 保存类别映射表（可选）
        print("\n🎉 所有预处理流程执行完成！")
    except Exception as e:
        print(f"\n❌ 执行失败: {e}")