import random
import time
from datetime import datetime, timedelta
from typing import List, Dict, Any

from sqlalchemy.orm import Session

from ..config.settings import get_data_generation_config
from ..database.connection import get_db_session
from ..models.hydrometric import HydrometricDataTimeseries, HydrometricDataRegular
from ..utils.logger import get_logger
from ..utils.time_utils import generate_random_time_in_range, format_duration

# 配置日志
logger = get_logger(__name__)


class DataGenerator:
    """水文流量数据生成器"""

    def __init__(self, batch_size: int = None):
        """初始化数据生成器
        
        Args:
            batch_size: 批量插入大小
        """
        config = get_data_generation_config()
        self.batch_size = batch_size or config['batch_size']
        self.session = None
        self.num_stations = config['default_station_count']
        self.num_records_per_station = config['default_records_per_station']
        self.total_records = self.num_stations * self.num_records_per_station

        # 生成测站ID列表
        self.station_ids = [f"ST{str(i).zfill(6)}" for i in range(1, self.num_stations + 1)]

        # 时间范围：过去一年到现在
        self.end_time = datetime.now()
        self.start_time = self.end_time - timedelta(days=365)

    def generate_flow_rate(self) -> float:
        """生成随机流量数据（模拟真实水文数据特征）"""
        # 基础流量：10-1000 m³/s
        base_flow = random.uniform(10, 1000)

        # 添加季节性变化
        seasonal_factor = 1 + 0.5 * random.random()

        # 添加随机波动
        noise_factor = 1 + 0.2 * (random.random() - 0.5)

        flow_rate = base_flow * seasonal_factor * noise_factor
        return round(flow_rate, 2)

    def generate_timestamp(self) -> datetime:
        """生成随机时间戳"""
        return generate_random_time_in_range(self.start_time, self.end_time)

    def generate_batch_data(self, batch_size: int) -> List[Dict[str, Any]]:
        """生成一批数据"""
        batch_data = []
        for _ in range(batch_size):
            station_id = random.choice(self.station_ids)
            timestamp = self.generate_timestamp()
            flow_rate = self.generate_flow_rate()
            batch_data.append({
                'station_id': station_id,
                'timestamp': timestamp,
                'flow_rate': flow_rate
            })
        return batch_data
    
    def generate_structured_data_optimized(self, num_stations: int, records_per_station: int):
        """优化的数据生成器，逐站生成并插入，避免内存溢出"""
        logger.info(f"开始优化数据生成: {num_stations} 个测站, 每个测站 {records_per_station:,} 条记录")
        
        total_processed = 0
        
        for i in range(num_stations):
            station_id = self.station_ids[i]
            logger.info(f"正在处理测站 {i+1}/{num_stations}: {station_id}")
            
            # 为当前测站生成时间戳序列（均匀分布在一年内）
            time_delta = (self.end_time - self.start_time).total_seconds()
            time_step = time_delta / records_per_station
            
            station_data = []
            for j in range(records_per_station):
                # 生成均匀分布的时间戳，加入随机偏移
                base_timestamp = self.start_time + timedelta(seconds=j * time_step)
                # 添加随机偏移（±30分钟）
                random_offset = timedelta(minutes=random.randint(-30, 30))
                timestamp = base_timestamp + random_offset
                
                flow_rate = self.generate_flow_rate()
                station_data.append({
                    'station_id': station_id,
                    'timestamp': timestamp,
                    'flow_rate': flow_rate
                })
                
                # 批量插入
                if len(station_data) >= self.batch_size:
                    self._insert_batch_optimized(station_data)
                    total_processed += len(station_data)
                    station_data = []
                    
                    if total_processed % (self.batch_size * 20) == 0:
                        logger.info(f"已处理 {total_processed:,} 条记录")
            
            # 插入剩余数据
            if station_data:
                self._insert_batch_optimized(station_data)
                total_processed += len(station_data)
        
        logger.info(f"数据生成完成，总计 {total_processed:,} 条记录")
        return total_processed

    def insert_batch_to_timeseries(self, batch_data: List[Dict[str, Any]], session: Session):
        """批量插入数据到时序表"""
        try:
            session.bulk_insert_mappings(HydrometricDataTimeseries, batch_data)
            session.commit()
        except Exception as e:
            session.rollback()
            logger.error(f"插入时序表数据失败: {e}")
            raise

    def insert_batch_to_regular(self, batch_data: List[Dict[str, Any]], session: Session):
        """批量插入数据到普通表"""
        try:
            session.bulk_insert_mappings(HydrometricDataRegular, batch_data)
            session.commit()
        except Exception as e:
            session.rollback()
            logger.error(f"插入普通表数据失败: {e}")
            raise
    
    def _insert_batch_optimized(self, batch_data: List[Dict[str, Any]]):
        """优化的批量插入方法，同时插入时序表和常规表"""
        with next(get_db_session()) as session:
            try:
                # 同时插入两个表以保持数据一致性
                session.bulk_insert_mappings(HydrometricDataTimeseries, batch_data)
                session.bulk_insert_mappings(HydrometricDataRegular, batch_data)
                session.commit()
            except Exception as e:
                session.rollback()
                logger.error(f"优化批量插入失败: {e}")
                raise

    def clear_existing_data(self):
        """清空现有数据"""
        try:
            logger.info("清空现有数据...")

            with next(get_db_session()) as session:
                # 清空时序表
                timeseries_count = session.query(HydrometricDataTimeseries).count()
                if timeseries_count > 0:
                    session.query(HydrometricDataTimeseries).delete()
                    logger.info(f"清空时序表 {timeseries_count:,} 条记录")

                # 清空普通表
                regular_count = session.query(HydrometricDataRegular).count()
                if regular_count > 0:
                    session.query(HydrometricDataRegular).delete()
                    logger.info(f"清空普通表 {regular_count:,} 条记录")

                session.commit()
                logger.info("数据清空完成")

        except Exception as e:
            logger.error(f"清空数据失败: {e}")
            raise

    def generate_and_insert_data(self, num_stations: int = None, records_per_station: int = None):
        """生成并插入数据到两个表 - 优化版本"""
        num_stations = num_stations or self.num_stations
        records_per_station = records_per_station or self.num_records_per_station
        total_records = num_stations * records_per_station

        logger.info(f"开始优化数据生成: {num_stations} 个测站, 每个测站 {records_per_station:,} 条记录")
        logger.info(f"总记录数: {total_records:,}")
        logger.info(f"批次大小: {self.batch_size:,}")

        start_time = time.time()

        try:
            # 清空现有数据
            self.clear_existing_data()

            # 使用优化的数据生成方法（流式处理，避免内存溢出）
            total_processed = self.generate_structured_data_optimized(num_stations, records_per_station)

            end_time = time.time()
            duration = end_time - start_time

            logger.info(f"\n=== 数据生成完成 ===")
            logger.info(f"实际处理记录数: {total_processed:,}")
            logger.info(f"总耗时: {format_duration(duration)}")
            logger.info(f"平均速度: {total_processed / duration:.0f} 记录/秒")
            logger.info(f"内存使用: 流式处理，避免了 {total_records * 0.1:.1f}MB 内存占用")

        except Exception as e:
            logger.error(f"数据生成失败: {e}")
            raise


def main():
    """主函数"""
    config = get_data_generation_config()

    logger.info("=== HydroPulse 数据生成器 ===")
    logger.info(f"配置信息:")
    logger.info(f"  - 测站数量: {config['default_station_count']:,}")
    logger.info(f"  - 每站记录数: {config['default_records_per_station']:,}")
    logger.info(f"  - 批次大小: {config['batch_size']:,}")

    generator = DataGenerator()

    try:
        generator.generate_and_insert_data()
        logger.info("\n=== 执行完成 ===")

    except Exception as e:
        logger.error(f"执行失败: {e}")
        raise


if __name__ == "__main__":
    main()
