"""
督导系统训练数据生成器
生成真实的督导数据用于模型训练
"""

import pandas as pd
import numpy as np
import random
import logging
from datetime import datetime, timedelta
from typing import List, Dict, Tuple
import os
import json
from dataclasses import dataclass
import jieba

# 配置日志
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)

@dataclass
class DataGeneratorConfig:
    """数据生成器配置"""
    # 数据规模
    num_areas: int = 100  # 区域数量
    num_records: int = 10000  # 督导记录数量
    time_span_days: int = 730  # 时间跨度（2年）
    
    # 地理范围（以北京为例）
    center_lat: float = 39.9042
    center_lon: float = 116.4074
    area_radius: float = 0.5  # 覆盖范围（度）
    
    # 数据分布参数
    seasonal_factor: float = 0.3  # 季节性因子
    weekend_factor: float = 0.7  # 周末因子
    area_risk_variance: float = 0.4  # 区域风险差异

class SupervisionDataGenerator:
    """督导数据生成器"""
    
    def __init__(self, config: DataGeneratorConfig = None):
        self.config = config or DataGeneratorConfig()
        self.logger = logging.getLogger(__name__)
        
        # 问题类型和对应的描述模板
        self.problem_templates = {
            "安全隐患": [
                "施工现场安全防护措施不到位，工人未佩戴安全帽",
                "高空作业安全绳索老化，存在重大安全隐患",
                "脚手架搭设不规范，缺少安全网保护",
                "电气线路私拉乱接，存在火灾隐患",
                "危险化学品储存不当，标识不清",
                "消防通道被杂物堵塞，影响疏散",
                "特种设备超期未检，存在安全风险"
            ],
            "环境污染": [
                "工地扬尘治理措施不力，PM2.5超标严重",
                "污水直排，未经处理排入市政管网",
                "噪音污染严重，夜间施工扰民",
                "建筑垃圾乱堆乱放，污染周边环境",
                "有害气体排放超标，影响空气质量",
                "固体废物处置不当，造成土壤污染",
                "油污泄漏未及时清理，污染地下水"
            ],
            "违规施工": [
                "未取得施工许可证擅自开工建设",
                "超出批准范围违法建设，占用绿地",
                "夜间违规施工，违反时间限制规定",
                "未按图纸施工，擅自变更设计方案",
                "违规使用禁用材料和工艺",
                "施工现场围挡不规范，影响市容",
                "占用公共道路施工，未办理相关手续"
            ],
            "质量问题": [
                "混凝土强度不达标，存在结构安全隐患",
                "墙体出现裂缝，施工质量存在问题",
                "防水工程质量不合格，出现渗漏",
                "钢筋保护层厚度不足，影响结构耐久性",
                "装修材料质量不合格，甲醛超标",
                "门窗安装不规范，密封性能差",
                "地基处理不当，可能影响建筑稳定性"
            ],
            "制度执行": [
                "安全生产责任制落实不到位",
                "项目管理制度执行不严格",
                "质量检查制度形同虚设",
                "人员培训制度执行不力",
                "应急预案制度不完善",
                "档案管理制度混乱，资料不全",
                "现场管理制度执行不规范"
            ],
            "人员管理": [
                "特种作业人员无证上岗",
                "施工人员安全培训不到位",
                "管理人员配备不足，现场无人监管",
                "劳务人员实名制管理不规范",
                "人员健康检查制度执行不力",
                "外来人员管理制度不完善",
                "岗位责任制落实不到位"
            ],
            "设备故障": [
                "塔吊设备故障频发，影响施工进度",
                "升降机安全装置失效，存在安全隐患",
                "混凝土搅拌设备老化，影响质量",
                "电梯设备维护不当，运行不正常",
                "通风设备故障，室内空气质量差",
                "监控设备损坏，无法正常监管",
                "消防设备故障，应急能力不足"
            ],
            "其他": [
                "周边居民投诉噪音扰民问题",
                "与相邻项目施工冲突",
                "市政配套设施损坏",
                "交通组织不合理，造成拥堵",
                "临时设施搭建不规范",
                "标识标牌设置不完整",
                "信息公示不及时、不准确"
            ]
        }
        
        # 严重程度关键词
        self.severity_keywords = {
            "紧急": ["重大", "严重", "危险", "紧急", "立即", "重大隐患", "可能导致事故"],
            "高": ["较严重", "明显", "超标", "不合格", "影响安全", "需要整改"],
            "中": ["一般", "轻微", "部分", "局部", "需要注意", "建议改进"],
            "低": ["微小", "轻度", "个别", "提醒", "建议", "完善"]
        }
        
        # 区域类型和风险特征
        self.area_types = {
            "工业区": {"base_risk": 0.7, "common_problems": ["安全隐患", "环境污染", "设备故障"]},
            "住宅区": {"base_risk": 0.3, "common_problems": ["违规施工", "质量问题", "环境污染"]},
            "商业区": {"base_risk": 0.4, "common_problems": ["制度执行", "人员管理", "其他"]},
            "混合区": {"base_risk": 0.5, "common_problems": ["安全隐患", "违规施工", "质量问题"]},
            "开发区": {"base_risk": 0.6, "common_problems": ["安全隐患", "环境污染", "违规施工"]}
        }
    
    def generate_areas(self) -> pd.DataFrame:
        """生成区域数据"""
        
        self.logger.info(f"生成 {self.config.num_areas} 个区域数据...")
        
        areas = []
        for i in range(self.config.num_areas):
            area_code = f"A{str(i+1).zfill(4)}"
            
            # 随机选择区域类型
            area_type = random.choice(list(self.area_types.keys()))
            type_info = self.area_types[area_type]
            
            # 生成地理坐标（围绕中心点正态分布）
            lat = np.random.normal(self.config.center_lat, self.config.area_radius/3)
            lon = np.random.normal(self.config.center_lon, self.config.area_radius/3)
            
            # 生成区域属性
            population = int(np.random.lognormal(9, 1)) if area_type != "工业区" else int(np.random.lognormal(7, 1))
            area_size = np.random.uniform(0.5, 15.0)
            
            # 基于区域类型确定基础风险等级
            base_risk = type_info["base_risk"]
            if base_risk > 0.6:
                risk_level = "高"
            elif base_risk > 0.35:
                risk_level = "中"
            else:
                risk_level = "低"
            
            areas.append({
                'area_code': area_code,
                'area_name': f"{area_type}_{area_code}",
                'area_type': area_type,
                'parent_area_code': None,
                'longitude': round(lon, 6),
                'latitude': round(lat, 6),
                'population': population,
                'area_size': round(area_size, 2),
                'risk_level': risk_level,
                'management_unit': f"管理单位_{(i//10)+1}"
            })
        
        return pd.DataFrame(areas)
    
    def generate_supervision_records(self, areas_df: pd.DataFrame) -> pd.DataFrame:
        """生成督导记录数据"""
        
        self.logger.info(f"生成 {self.config.num_records} 条督导记录...")
        
        records = []
        start_date = datetime.now() - timedelta(days=self.config.time_span_days)
        
        # 为每个区域分配记录数量（基于风险等级）
        area_weights = []
        for _, area in areas_df.iterrows():
            base_weight = self.area_types[area['area_type']]["base_risk"]
            # 高风险区域产生更多问题
            if area['risk_level'] == "高":
                weight = base_weight * 1.5
            elif area['risk_level'] == "中":
                weight = base_weight * 1.0
            else:
                weight = base_weight * 0.6
            area_weights.append(weight)
        
        # 标准化权重
        area_weights = np.array(area_weights)
        area_weights = area_weights / area_weights.sum()
        
        for i in range(self.config.num_records):
            # 选择区域（基于权重）
            area_idx = np.random.choice(len(areas_df), p=area_weights)
            area = areas_df.iloc[area_idx]
            
            # 生成时间（考虑季节性和工作日模式）
            random_days = np.random.randint(0, self.config.time_span_days)
            record_date = start_date + timedelta(days=random_days)
            
            # 季节性调整（夏季和冬季问题更多）
            month = record_date.month
            seasonal_multiplier = 1.0
            if month in [6, 7, 8, 12, 1, 2]:  # 夏季和冬季
                seasonal_multiplier = 1 + self.config.seasonal_factor
            
            # 工作日调整（周末问题较少）
            if record_date.weekday() >= 5:  # 周末
                seasonal_multiplier *= self.config.weekend_factor
            
            # 基于区域类型选择问题类型
            area_type = area['area_type']
            common_problems = self.area_types[area_type]["common_problems"]
            
            # 70%概率选择常见问题，30%概率选择其他问题
            if random.random() < 0.7:
                problem_type = random.choice(common_problems)
            else:
                problem_type = random.choice(list(self.problem_templates.keys()))
            
            # 生成问题描述
            base_description = random.choice(self.problem_templates[problem_type])
            
            # 确定严重程度
            severity_probs = [0.4, 0.35, 0.2, 0.05]  # 低、中、高、紧急的概率
            if area['risk_level'] == "高":
                severity_probs = [0.2, 0.3, 0.35, 0.15]  # 高风险区域问题更严重
            elif area['risk_level'] == "低":
                severity_probs = [0.6, 0.3, 0.08, 0.02]  # 低风险区域问题较轻
            
            severity_level = np.random.choice(["低", "中", "高", "紧急"], p=severity_probs)
            
            # 根据严重程度调整描述
            if severity_level in ["高", "紧急"]:
                severity_words = random.choice(self.severity_keywords[severity_level])
                problem_description = f"{base_description}，{severity_words}，需要立即处理"
            else:
                problem_description = base_description
            
            # 生成督导人员ID
            supervisor_id = f"SUP{random.randint(1, 50):03d}"
            
            # 生成整改相关信息
            rectification_deadline = record_date + timedelta(days=random.randint(3, 30))
            
            # 整改状态（基于时间和严重程度）
            days_passed = (datetime.now() - record_date).days
            if days_passed > 30:
                if severity_level in ["高", "紧急"]:
                    rectification_status = random.choice(["已完成", "延期"], weights=[0.7, 0.3])
                else:
                    rectification_status = random.choice(["已完成", "延期"], weights=[0.9, 0.1])
            else:
                rectification_status = random.choice(["进行中", "已完成"], weights=[0.6, 0.4])
            
            # 跟进日期
            if rectification_status == "已完成":
                follow_up_date = record_date + timedelta(days=random.randint(1, 20))
            else:
                follow_up_date = None
            
            # 添加地理位置的小幅随机偏移
            lon_offset = np.random.normal(0, 0.005)
            lat_offset = np.random.normal(0, 0.005)
            
            records.append({
                'id': i + 1,
                'supervision_date': record_date.date(),
                'area_code': area['area_code'],
                'area_name': area['area_name'],
                'longitude': round(area['longitude'] + lon_offset, 6),
                'latitude': round(area['latitude'] + lat_offset, 6),
                'problem_description': problem_description,
                'problem_type': problem_type,
                'severity_level': severity_level,
                'supervisor_id': supervisor_id,
                'rectification_deadline': rectification_deadline.date(),
                'rectification_status': rectification_status,
                'follow_up_date': follow_up_date.date() if follow_up_date else None,
                'created_at': record_date,
                'updated_at': record_date
            })
        
        return pd.DataFrame(records)
    
    def add_temporal_patterns(self, records_df: pd.DataFrame) -> pd.DataFrame:
        """添加时间模式（趋势、周期性等）"""
        
        self.logger.info("添加时间模式...")
        
        # 按时间排序
        records_df = records_df.sort_values('supervision_date').reset_index(drop=True)
        
        # 添加时间特征
        records_df['supervision_date'] = pd.to_datetime(records_df['supervision_date'])
        records_df['year'] = records_df['supervision_date'].dt.year
        records_df['month'] = records_df['supervision_date'].dt.month
        records_df['day_of_week'] = records_df['supervision_date'].dt.dayofweek
        records_df['quarter'] = records_df['supervision_date'].dt.quarter
        
        return records_df
    
    def generate_dataset(self) -> Tuple[pd.DataFrame, pd.DataFrame]:
        """生成完整的数据集"""
        
        self.logger.info("开始生成督导训练数据集...")
        
        # 生成区域数据
        areas_df = self.generate_areas()
        
        # 生成督导记录
        records_df = self.generate_supervision_records(areas_df)
        
        # 添加时间模式
        records_df = self.add_temporal_patterns(records_df)
        
        # 数据质量检查
        self._validate_data(areas_df, records_df)
        
        self.logger.info("数据集生成完成！")
        self.logger.info(f"区域数量: {len(areas_df)}")
        self.logger.info(f"督导记录数量: {len(records_df)}")
        
        return areas_df, records_df
    
    def _validate_data(self, areas_df: pd.DataFrame, records_df: pd.DataFrame):
        """验证数据质量"""
        
        # 检查区域代码唯一性
        assert areas_df['area_code'].nunique() == len(areas_df), "区域代码存在重复"
        
        # 检查记录中的区域代码都存在
        assert records_df['area_code'].isin(areas_df['area_code']).all(), "存在无效的区域代码"
        
        # 检查必要字段无空值
        critical_fields = ['area_code', 'problem_type', 'severity_level', 'supervision_date']
        for field in critical_fields:
            assert not records_df[field].isnull().any(), f"字段 {field} 存在空值"
        
        # 检查日期合理性
        assert (records_df['supervision_date'] <= datetime.now()).all(), "存在未来日期"
        
        self.logger.info("数据质量检查通过")
    
    def save_dataset(self, areas_df: pd.DataFrame, records_df: pd.DataFrame, 
                    output_dir: str = "data"):
        """保存数据集到文件"""
        
        os.makedirs(output_dir, exist_ok=True)
        
        # 保存CSV文件
        areas_path = os.path.join(output_dir, "areas.csv")
        records_path = os.path.join(output_dir, "supervision_records.csv")
        
        areas_df.to_csv(areas_path, index=False, encoding='utf-8')
        records_df.to_csv(records_path, index=False, encoding='utf-8')
        
        # 保存数据统计信息
        stats = {
            "generation_time": datetime.now().isoformat(),
            "config": {
                "num_areas": self.config.num_areas,
                "num_records": self.config.num_records,
                "time_span_days": self.config.time_span_days
            },
            "statistics": {
                "areas_count": len(areas_df),
                "records_count": len(records_df),
                "date_range": {
                    "start": records_df['supervision_date'].min().isoformat(),
                    "end": records_df['supervision_date'].max().isoformat()
                },
                "problem_type_distribution": records_df['problem_type'].value_counts().to_dict(),
                "severity_distribution": records_df['severity_level'].value_counts().to_dict(),
                "area_type_distribution": areas_df['area_type'].value_counts().to_dict()
            }
        }
        
        stats_path = os.path.join(output_dir, "dataset_stats.json")
        with open(stats_path, 'w', encoding='utf-8') as f:
            json.dump(stats, f, ensure_ascii=False, indent=2, default=str)
        
        self.logger.info(f"数据集已保存到: {output_dir}")
        self.logger.info(f"  区域数据: {areas_path}")
        self.logger.info(f"  督导记录: {records_path}")
        self.logger.info(f"  统计信息: {stats_path}")

def create_database_sql(areas_df: pd.DataFrame, records_df: pd.DataFrame, 
                       output_file: str = "data/create_database.sql"):
    """生成数据库创建和插入SQL脚本"""
    
    logger.info("生成数据库SQL脚本...")
    
    sql_commands = []
    
    # 创建表结构
    sql_commands.append("""
-- 创建区域信息表
CREATE TABLE IF NOT EXISTS area_info (
    area_code VARCHAR(50) PRIMARY KEY,
    area_name VARCHAR(100),
    area_type VARCHAR(50),
    parent_area_code VARCHAR(50),
    longitude DECIMAL(10,6),
    latitude DECIMAL(10,6),
    population INTEGER,
    area_size DECIMAL(10,2),
    risk_level VARCHAR(20),
    management_unit VARCHAR(100)
);
""")
    
    sql_commands.append("""
-- 创建督导记录表
CREATE TABLE IF NOT EXISTS supervision_records (
    id SERIAL PRIMARY KEY,
    supervision_date DATE,
    area_code VARCHAR(50),
    area_name VARCHAR(100),
    longitude DECIMAL(10,6),
    latitude DECIMAL(10,6),
    problem_description TEXT,
    problem_type VARCHAR(50),
    severity_level VARCHAR(20),
    supervisor_id VARCHAR(50),
    rectification_deadline DATE,
    rectification_status VARCHAR(50),
    follow_up_date DATE,
    created_at TIMESTAMP,
    updated_at TIMESTAMP,
    FOREIGN KEY (area_code) REFERENCES area_info(area_code)
);
""")
    
    # 清空现有数据
    sql_commands.append("TRUNCATE TABLE supervision_records CASCADE;")
    sql_commands.append("TRUNCATE TABLE area_info CASCADE;")
    
    # 插入区域数据
    sql_commands.append("\n-- 插入区域数据")
    for _, row in areas_df.iterrows():
        sql = f"""INSERT INTO area_info VALUES (
    '{row['area_code']}', '{row['area_name']}', '{row['area_type']}', 
    NULL, {row['longitude']}, {row['latitude']}, {row['population']}, 
    {row['area_size']}, '{row['risk_level']}', '{row['management_unit']}'
);"""
        sql_commands.append(sql)
    
    # 插入督导记录数据
    sql_commands.append("\n-- 插入督导记录数据")
    for _, row in records_df.iterrows():
        follow_up = f"'{row['follow_up_date']}'" if pd.notna(row['follow_up_date']) else "NULL"
        description = row['problem_description'].replace("'", "''")  # 转义单引号
        
        sql = f"""INSERT INTO supervision_records (
    supervision_date, area_code, area_name, longitude, latitude,
    problem_description, problem_type, severity_level, supervisor_id,
    rectification_deadline, rectification_status, follow_up_date,
    created_at, updated_at
) VALUES (
    '{row['supervision_date']}', '{row['area_code']}', '{row['area_name']}',
    {row['longitude']}, {row['latitude']}, '{description}',
    '{row['problem_type']}', '{row['severity_level']}', '{row['supervisor_id']}',
    '{row['rectification_deadline']}', '{row['rectification_status']}', {follow_up},
    '{row['created_at']}', '{row['updated_at']}'
);"""
        sql_commands.append(sql)
    
    # 写入文件
    os.makedirs(os.path.dirname(output_file), exist_ok=True)
    with open(output_file, 'w', encoding='utf-8') as f:
        f.write('\n'.join(sql_commands))
    
    logger.info(f"数据库SQL脚本已生成: {output_file}")

def main():
    """主函数"""
    
    logger.info("🚀 督导系统训练数据生成器")
    logger.info("=" * 50)
    
    # 创建配置
    config = DataGeneratorConfig(
        num_areas=150,      # 150个区域
        num_records=20000,  # 2万条督导记录
        time_span_days=900  # 覆盖2.5年时间
    )
    
    # 创建生成器
    generator = SupervisionDataGenerator(config)
    
    # 生成数据集
    areas_df, records_df = generator.generate_dataset()
    
    # 保存数据集
    generator.save_dataset(areas_df, records_df)
    
    # 生成数据库SQL脚本
    create_database_sql(areas_df, records_df)
    
    # 打印统计信息
    logger.info("\n" + "=" * 50)
    logger.info("📊 数据集统计信息:")
    logger.info(f"区域总数: {len(areas_df)}")
    logger.info(f"督导记录总数: {len(records_df)}")
    logger.info(f"时间跨度: {records_df['supervision_date'].min()} 到 {records_df['supervision_date'].max()}")
    
    logger.info("\n问题类型分布:")
    for ptype, count in records_df['problem_type'].value_counts().items():
        logger.info(f"  {ptype}: {count} ({count/len(records_df)*100:.1f}%)")
    
    logger.info("\n严重程度分布:")
    for severity, count in records_df['severity_level'].value_counts().items():
        logger.info(f"  {severity}: {count} ({count/len(records_df)*100:.1f}%)")
    
    logger.info("\n区域类型分布:")
    for atype, count in areas_df['area_type'].value_counts().items():
        logger.info(f"  {atype}: {count} ({count/len(areas_df)*100:.1f}%)")
    
    logger.info("\n✅ 数据生成完成！")
    logger.info("📁 文件位置:")
    logger.info("  - data/areas.csv")
    logger.info("  - data/supervision_records.csv") 
    logger.info("  - data/dataset_stats.json")
    logger.info("  - data/create_database.sql")
    logger.info("\n🚀 现在可以运行以下命令开始训练:")
    logger.info("  python train_models.py --models all")

if __name__ == "__main__":
    main() 