"""
数据加载和预处理模块
负责从数据库加载督导数据并进行预处理
"""

import pandas as pd
import numpy as np
import logging
import os
from typing import Dict, List, Tuple, Optional
from datetime import datetime, timedelta
from sqlalchemy import create_engine
import geopandas as gpd
from shapely.geometry import Point
import jieba
import re
from sklearn.preprocessing import StandardScaler, LabelEncoder
from config.model_config import config

class DataLoader:
    """数据加载器"""
    
    def __init__(self):
        self.engine = self._create_db_connection()
        self.logger = logging.getLogger(__name__)
        
    def _create_db_connection(self):
        """创建数据库连接"""
        db_url = f"postgresql://{config.data.db_user}:{config.data.db_password}@{config.data.db_host}:{config.data.db_port}/{config.data.db_name}"
        return create_engine(db_url)
    
    def load_supervision_records(self, start_date: str = None, end_date: str = None) -> pd.DataFrame:
        """加载督导记录数据"""
        
        # 首先尝试从真实数据CSV文件加载
        csv_files = [
            "supervision_records.csv",
            "workplace_safety_inspections.csv",
            "construction_safety_inspections.csv", 
            "environmental_inspections.csv",
            "food_safety_inspections.csv",
            "fire_safety_inspections.csv"
        ]
        
        all_data = []
        
        for csv_file in csv_files:
            csv_path = os.path.join(config.data.data_dir, csv_file)
            if os.path.exists(csv_path):
                try:
                    df = pd.read_csv(csv_path, encoding='utf-8')
                    
                    # 标准化列名映射
                    column_mapping = self._get_column_mapping(csv_file)
                    if column_mapping:
                        df = df.rename(columns=column_mapping)
                    
                    # 添加数据源标识
                    df['data_source'] = csv_file.replace('.csv', '')
                    
                    all_data.append(df)
                    self.logger.info(f"从 {csv_file} 加载了 {len(df)} 条记录")
                    
                except Exception as e:
                    self.logger.warning(f"从 {csv_file} 加载数据失败: {e}")
        
        if all_data:
            # 合并所有数据
            df = pd.concat(all_data, ignore_index=True, sort=False)
            
            # 标准化日期列
            date_columns = ['supervision_date', 'inspection_date']
            for date_col in date_columns:
                if date_col in df.columns:
                    df['supervision_date'] = pd.to_datetime(df[date_col], errors='coerce')
                    break
            
            # 应用日期过滤
            if start_date or end_date:
                if 'supervision_date' in df.columns:
                    if start_date:
                        df = df[df['supervision_date'] >= start_date]
                    if end_date:
                        df = df[df['supervision_date'] <= end_date]
            
            df = df.sort_values('supervision_date', ascending=False)
            self.logger.info(f"总共加载了 {len(df)} 条督导记录")
            return df
        
        # 如果CSV不存在或加载失败，尝试从数据库加载
        query = """
        SELECT 
            id,
            supervision_date,
            area_code,
            area_name,
            longitude,
            latitude,
            problem_description,
            problem_type,
            severity_level,
            supervisor_id,
            rectification_deadline,
            rectification_status,
            follow_up_date,
            created_at,
            updated_at
        FROM supervision_records
        """
        
        conditions = []
        if start_date:
            conditions.append(f"supervision_date >= '{start_date}'")
        if end_date:
            conditions.append(f"supervision_date <= '{end_date}'")
            
        if conditions:
            query += " WHERE " + " AND ".join(conditions)
            
        query += " ORDER BY supervision_date DESC"
        
        try:
            df = pd.read_sql(query, self.engine)
            self.logger.info(f"从数据库加载了 {len(df)} 条督导记录")
            return df
        except Exception as e:
            self.logger.error(f"加载督导记录失败: {e}")
            return pd.DataFrame()
    
    def load_area_info(self) -> pd.DataFrame:
        """加载区域信息数据"""
        
        # 首先尝试从CSV文件加载
        csv_path = os.path.join(config.data.data_dir, "areas.csv")
        if os.path.exists(csv_path):
            try:
                df = pd.read_csv(csv_path)
                self.logger.info(f"从CSV文件加载了 {len(df)} 个区域信息")
                return df
            except Exception as e:
                self.logger.warning(f"从CSV文件加载失败: {e}，尝试从数据库加载")
        
        # 如果CSV不存在或加载失败，尝试从数据库加载
        query = """
        SELECT 
            area_code,
            area_name,
            area_type,
            parent_area_code,
            longitude,
            latitude,
            population,
            area_size,
            risk_level,
            management_unit
        FROM area_info
        """
        
        try:
            df = pd.read_sql(query, self.engine)
            self.logger.info(f"从数据库加载了 {len(df)} 个区域信息")
            return df
        except Exception as e:
            self.logger.error(f"加载区域信息失败: {e}")
            return pd.DataFrame()
    
    def load_historical_problems(self, days_back: int = 365) -> pd.DataFrame:
        """加载历史问题数据"""
        end_date = datetime.now()
        start_date = end_date - timedelta(days=days_back)
        
        query = f"""
        SELECT 
            area_code,
            problem_type,
            severity_level,
            COUNT(*) as problem_count,
            AVG(CASE WHEN rectification_status = '已完成' THEN 1 ELSE 0 END) as rectification_rate,
            DATE_TRUNC('month', supervision_date) as month
        FROM supervision_records
        WHERE supervision_date >= '{start_date.strftime('%Y-%m-%d')}'
        GROUP BY area_code, problem_type, severity_level, DATE_TRUNC('month', supervision_date)
        ORDER BY month DESC
        """
        
        try:
            df = pd.read_sql(query, self.engine)
            self.logger.info(f"加载了 {len(df)} 条历史问题统计")
            return df
        except Exception as e:
            self.logger.error(f"加载历史问题数据失败: {e}")
            return pd.DataFrame()
    
    def _get_column_mapping(self, filename: str) -> dict:
        """获取不同数据文件的列名映射"""
        
        mappings = {
            "workplace_safety_inspections.csv": {
                "inspection_id": "id",
                "inspection_date": "supervision_date",
                "company_name": "area_name",
                "violation_type": "problem_description",
                "compliance_status": "problem_type",
                "severity_level": "severity_level",
                "inspector_id": "supervisor_id",
                "rectification_deadline": "rectification_deadline",
                "created_at": "created_at"
            },
            "construction_safety_inspections.csv": {
                "inspection_id": "id",
                "inspection_date": "supervision_date", 
                "project_name": "area_name",
                "violation_description": "problem_description",
                "risk_level": "severity_level",
                "inspector_name": "supervisor_id",
                "rectification_required": "rectification_status",
                "created_time": "created_at",
                "longitude": "longitude",
                "latitude": "latitude"
            },
            "environmental_inspections.csv": {
                "inspection_id": "id",
                "inspection_date": "supervision_date",
                "enterprise_name": "area_name", 
                "pollution_type": "problem_description",
                "compliance_status": "problem_type",
                "inspector_team": "supervisor_id",
                "next_inspection_date": "rectification_deadline",
                "report_time": "created_at",
                "longitude": "longitude",
                "latitude": "latitude"
            },
            "food_safety_inspections.csv": {
                "inspection_id": "id",
                "inspection_date": "supervision_date",
                "business_name": "area_name",
                "violation_type": "problem_description", 
                "inspection_result": "problem_type",
                "risk_level": "severity_level",
                "inspector_id": "supervisor_id",
                "rectification_required": "rectification_status",
                "inspection_time": "created_at"
            },
            "fire_safety_inspections.csv": {
                "inspection_id": "id",
                "inspection_date": "supervision_date",
                "facility_name": "area_name",
                "fire_hazard": "problem_description",
                "safety_status": "problem_type", 
                "severity_level": "severity_level",
                "fire_inspector": "supervisor_id",
                "rectification_deadline": "rectification_deadline",
                "inspection_timestamp": "created_at"
            }
        }
        
        return mappings.get(filename, {})

class DataPreprocessor:
    """数据预处理器"""
    
    def __init__(self):
        self.logger = logging.getLogger(__name__)
        self.scaler = StandardScaler()
        self.label_encoders = {}
        
    def preprocess_for_risk_prediction(self, supervision_df: pd.DataFrame, 
                                     area_df: pd.DataFrame, 
                                     historical_df: pd.DataFrame) -> Tuple[pd.DataFrame, pd.DataFrame]:
        """为风险预警模型预处理数据"""
        
        # 1. 创建时空网格特征
        grid_features = self._create_spatial_temporal_features(supervision_df)
        
        # 2. 创建历史统计特征
        historical_features = self._create_historical_features(historical_df)
        
        # 3. 创建区域特征
        area_features = self._create_area_features(area_df)
        
        # 4. 合并所有特征
        features = pd.merge(grid_features, historical_features, on='area_code', how='left')
        features = pd.merge(features, area_features, on='area_code', how='left')
        
        # 5. 创建目标变量 (未来30天问题数量)
        targets = self._create_risk_targets(supervision_df)
        
        # 6. 数据清洗和标准化
        features = self._clean_and_normalize_features(features)
        
        self.logger.info(f"风险预警特征维度: {features.shape}")
        return features, targets
    
    def preprocess_for_problem_classification(self, supervision_df: pd.DataFrame) -> Tuple[List[str], List[str], List[str]]:
        """为问题分类模型预处理数据"""
        
        # 1. 文本清洗
        texts = []
        problem_types = []
        severity_levels = []
        
        for _, row in supervision_df.iterrows():
            # 合并文本字段
            text_parts = []
            for field in config.problem_classification.text_fields:
                if field in row and pd.notna(row[field]):
                    text_parts.append(str(row[field]))
            
            if text_parts:
                combined_text = " ".join(text_parts)
                cleaned_text = self._clean_text(combined_text)
                
                if len(cleaned_text) > 10:  # 过滤太短的文本
                    texts.append(cleaned_text)
                    problem_types.append(row.get('problem_type', '其他'))
                    severity_levels.append(row.get('severity_level', '中'))
        
        self.logger.info(f"问题分类数据量: {len(texts)}")
        return texts, problem_types, severity_levels
    
    def preprocess_for_heatmap_prediction(self, supervision_df: pd.DataFrame) -> Tuple[np.ndarray, np.ndarray]:
        """为热力图预测模型预处理数据"""
        
        # 1. 创建时空网格
        spatial_grid, temporal_grid = self._create_spatiotemporal_grid(supervision_df)
        
        # 2. 统计每个网格的问题数量
        problem_counts = self._count_problems_in_grid(supervision_df, spatial_grid, temporal_grid)
        
        # 3. 创建序列数据
        X, y = self._create_sequence_data(problem_counts)
        
        self.logger.info(f"热力图预测数据形状: X={X.shape}, y={y.shape}")
        return X, y
    
    def _create_spatial_temporal_features(self, df: pd.DataFrame) -> pd.DataFrame:
        """创建时空特征"""
        # 按区域和时间聚合
        df['supervision_date'] = pd.to_datetime(df['supervision_date'])
        df['month'] = df['supervision_date'].dt.to_period('M')
        
        features = df.groupby(['area_code', 'month']).agg({
            'id': 'count',  # 问题总数
            'severity_level': lambda x: (x == '高').sum() + (x == '紧急').sum() * 2,  # 严重程度得分
            'problem_type': 'nunique',  # 问题类型数量
            'longitude': 'mean',
            'latitude': 'mean'
        }).reset_index()
        
        features.columns = ['area_code', 'month', 'problem_count', 'severity_score', 'problem_type_count', 'longitude', 'latitude']
        
        return features
    
    def _create_historical_features(self, df: pd.DataFrame) -> pd.DataFrame:
        """创建历史统计特征"""
        features = df.groupby('area_code').agg({
            'problem_count': ['mean', 'std', 'max'],
            'rectification_rate': 'mean'
        }).reset_index()
        
        features.columns = ['area_code', 'avg_problem_count', 'std_problem_count', 
                           'max_problem_count', 'avg_rectification_rate']
        
        return features
    
    def _create_area_features(self, df: pd.DataFrame) -> pd.DataFrame:
        """创建区域特征"""
        # 数值特征标准化
        numeric_columns = ['population', 'area_size']
        df_processed = df.copy()
        
        for col in numeric_columns:
            if col in df_processed.columns:
                df_processed[col] = self.scaler.fit_transform(df_processed[[col]])
        
        # 类别特征编码
        categorical_columns = ['area_type', 'risk_level', 'management_unit']
        for col in categorical_columns:
            if col in df_processed.columns:
                if col not in self.label_encoders:
                    self.label_encoders[col] = LabelEncoder()
                df_processed[col] = self.label_encoders[col].fit_transform(df_processed[col].fillna('未知'))
        
        return df_processed
    
    def _create_risk_targets(self, df: pd.DataFrame) -> pd.DataFrame:
        """创建风险预测目标变量"""
        df['supervision_date'] = pd.to_datetime(df['supervision_date'])
        
        # 为每个区域创建未来30天的问题数量作为目标
        targets = []
        for area_code in df['area_code'].unique():
            area_data = df[df['area_code'] == area_code].sort_values('supervision_date')
            
            for i in range(len(area_data) - 30):
                current_date = area_data.iloc[i]['supervision_date']
                future_date = current_date + timedelta(days=30)
                
                future_problems = len(area_data[
                    (area_data['supervision_date'] > current_date) & 
                    (area_data['supervision_date'] <= future_date)
                ])
                
                targets.append({
                    'area_code': area_code,
                    'date': current_date,
                    'future_problems': future_problems,
                    'risk_level': 1 if future_problems > 5 else 0  # 二分类目标
                })
        
        return pd.DataFrame(targets)
    
    def _clean_text(self, text: str) -> str:
        """清洗文本数据"""
        # 去除特殊字符
        text = re.sub(r'[^\u4e00-\u9fff\w\s]', '', text)
        # 分词
        words = jieba.cut(text)
        # 去除停用词和短词
        cleaned_words = [word.strip() for word in words if len(word.strip()) > 1]
        return ' '.join(cleaned_words)
    
    def _create_spatiotemporal_grid(self, df: pd.DataFrame) -> Tuple[np.ndarray, np.ndarray]:
        """创建时空网格"""
        # 空间网格
        lon_min, lon_max = df['longitude'].min(), df['longitude'].max()
        lat_min, lat_max = df['latitude'].min(), df['latitude'].max()
        
        resolution = config.heatmap_prediction.grid_resolution
        lon_grid = np.arange(lon_min, lon_max, resolution)
        lat_grid = np.arange(lat_min, lat_max, resolution)
        
        # 时间网格
        df['supervision_date'] = pd.to_datetime(df['supervision_date'])
        date_min, date_max = df['supervision_date'].min(), df['supervision_date'].max()
        time_grid = pd.date_range(date_min, date_max, freq='D')
        
        return (lon_grid, lat_grid), time_grid
    
    def _count_problems_in_grid(self, df: pd.DataFrame, spatial_grid: Tuple, temporal_grid: np.ndarray) -> np.ndarray:
        """统计网格内问题数量"""
        lon_grid, lat_grid = spatial_grid
        resolution = config.heatmap_prediction.grid_resolution
        
        # 创建3D数组 (时间, 纬度, 经度)
        counts = np.zeros((len(temporal_grid), len(lat_grid), len(lon_grid)))
        
        df['supervision_date'] = pd.to_datetime(df['supervision_date'])
        
        for _, row in df.iterrows():
            # 找到对应的网格索引
            lon_idx = int((row['longitude'] - lon_grid[0]) / resolution)
            lat_idx = int((row['latitude'] - lat_grid[0]) / resolution)
            
            # 找到时间索引
            time_idx = (temporal_grid.get_loc(row['supervision_date'].normalize()) 
                       if row['supervision_date'].normalize() in temporal_grid else -1)
            
            if 0 <= lon_idx < len(lon_grid) and 0 <= lat_idx < len(lat_grid) and time_idx >= 0:
                counts[time_idx, lat_idx, lon_idx] += 1
        
        return counts
    
    def _create_sequence_data(self, problem_counts: np.ndarray) -> Tuple[np.ndarray, np.ndarray]:
        """创建序列数据用于训练"""
        window_size = config.heatmap_prediction.temporal_window_hours // 24  # 转换为天数
        
        X, y = [], []
        
        for i in range(window_size, len(problem_counts)):
            X.append(problem_counts[i-window_size:i])
            y.append(problem_counts[i])
        
        return np.array(X), np.array(y)
    
    def _clean_and_normalize_features(self, df: pd.DataFrame) -> pd.DataFrame:
        """清洗和标准化特征"""
        # 处理缺失值
        df = df.fillna(df.mean())
        
        # 去除异常值
        for col in df.select_dtypes(include=[np.number]).columns:
            Q1 = df[col].quantile(0.25)
            Q3 = df[col].quantile(0.75)
            IQR = Q3 - Q1
            lower = Q1 - 1.5 * IQR
            upper = Q3 + 1.5 * IQR
            df[col] = df[col].clip(lower, upper)
        
        return df 