import asyncio
import aiomysql
import yaml
import time
import os
import random
from datetime import datetime, timedelta
from decimal import Decimal
from loguru import logger
from typing import List, Dict, Any, Optional, Tuple
import polars as pl
from .notify import send_error_alert


class BasicOrderHandler:
    """
    异步数据同步处理器
    使用aiomysql实现高性能异步数据库操作，支持事务一致性和并发控制
    """
    
    def __init__(self, max_connections: int = 10):
        self.max_connections = max_connections
        self.source_pool = None
        self.target_pool = None
        self.mall_pool = None
        self.config = None
        self._semaphore = asyncio.Semaphore(max_connections)
    
    async def __aenter__(self):
        await self.initialize_pools()
        return self
    
    async def __aexit__(self, exc_type, exc_val, exc_tb):
        await self.close_pools()
    
    async def initialize_pools(self):
        """
        初始化数据库连接池
        """
        # 读取配置文件
        config_path = os.path.join(
            os.path.dirname(os.path.dirname(os.path.abspath(__file__))), 
            'config', 'config.yaml'
        )
        with open(config_path, encoding='utf-8') as f:
            self.config = yaml.safe_load(f)
        
        # 创建连接池
        self.source_pool = await aiomysql.create_pool(
            host=self.config['source_db']['host'],
            port=self.config['source_db']['port'],
            user=self.config['source_db']['user'],
            password=self.config['source_db']['password'],
            db=self.config['source_db']['database'],
            charset=self.config['source_db'].get('charset', 'utf8mb4'),
            minsize=5,
            maxsize=self.max_connections,
            autocommit=False
        )
        
        self.target_pool = await aiomysql.create_pool(
            host=self.config['target_db']['host'],
            port=self.config['target_db']['port'],
            user=self.config['target_db']['user'],
            password=self.config['target_db']['password'],
            db=self.config['target_db']['database'],
            charset=self.config['target_db'].get('charset', 'utf8mb4'),
            minsize=5,
            maxsize=self.max_connections,
            autocommit=False
        )
        
        if 'mall_db' in self.config:
            self.mall_pool = await aiomysql.create_pool(
                host=self.config['mall_db']['host'],
                port=self.config['mall_db']['port'],
                user=self.config['mall_db']['user'],
                password=self.config['mall_db']['password'],
                db=self.config['mall_db']['database'],
                charset=self.config['mall_db'].get('charset', 'utf8mb4'),
                minsize=3,
                maxsize=5,
                autocommit=False
            )
    
    async def close_pools(self):
        """
        关闭所有连接池
        """
        if self.source_pool:
            self.source_pool.close()
            await self.source_pool.wait_closed()
        
        if self.target_pool:
            self.target_pool.close()
            await self.target_pool.wait_closed()
        
        if self.mall_pool:
            self.mall_pool.close()
            await self.mall_pool.wait_closed()


    def get_last_time_range(self) -> tuple:
            """
            获取最后一次同步的时间范围（1小时之前）

            """
            now = datetime.now()
            one_hour_ago = now - timedelta(hours=1)
            return (
                one_hour_ago.strftime('%Y-%m-%d %H:%M:%S'), 
                now.strftime('%Y-%m-%d %H:%M:%S')
            )


    async def fetch_main_data(self, start_time: Optional[str] = None, 
                             end_time: Optional[str] = None) -> List[Dict[str, Any]]:
        """
        异步获取主数据
        """
        if not start_time or not end_time:
            start_time, end_time = self.get_last_time_range()
        
        async with self.source_pool.acquire() as conn:
            async with conn.cursor() as cursor:
                sql_queries = [
                    f"""
                    SELECT S.id,S.completetime
                    FROM(SELECT a.TargetEntityId AS id,b.DoneAt AS completetime,
                    ROW_NUMBER() OVER(PARTITION BY a.TargetEntityId ORDER BY b.DoneAt DESC) AS num
                    FROM workflowruntimeitems a
                    JOIN workflowruntimesteps b
                      ON b.RuntimeItemId=a.Id
                    	AND b.Deleted=0
                    WHERE a.Deleted=0
                      AND b.Status='ACCEPTED'
                      AND b.Name IN ('提交处理结果','提交备案','重新提交处理结果')
                      AND b.DoneAt>=DATE_ADD(NOW(), INTERVAL -1 DAY)
                      AND b.DoneAt>='{start_time}'
                      AND b.DoneAt<'{end_time}') AS S
                    WHERE S.num=1;
                    """
                ]
                
                all_results = []
                for i, sql in enumerate(sql_queries, 1):
                    await cursor.execute(sql)
                    rows = await cursor.fetchall()
                    type_results = [{'id': row[0], 'completetime': row[1]} for row in rows]
                    all_results.extend(type_results)
                    logger.info(f"[ASYNC_FETCH] 查询 {i} 完成，获取 {len(type_results)} 条记录")
                
                return all_results            


    async def fetch_mainpart_data(self, id: str) -> List[Dict[str, Any]]:
        """
        异步获取商品数据
        """
        if not id or not self.mall_pool:
            return []
        
        async with self.mall_pool.acquire() as conn:
            async with conn.cursor() as cursor:
                mainpart_sql = """
                SELECT DISTINCT c.MainPartId, c.MainPartName
                FROM tb_orderinfo a
                JOIN tb_orderitem b ON b.OrderId=a.Id
                  AND b.GoodsType IN (5,10,11,18,37,38)
                  AND b.Deleted=0
                JOIN tb_orderitemdetail c ON c.ItemId=b.Id
                  AND c.Deleted=0
                WHERE a.Deleted=0
                  AND a.AppId=%s
                """
                
                await cursor.execute(mainpart_sql, (id,))
                rows = await cursor.fetchall()
                
                if rows:
                    columns = [desc[0] for desc in cursor.description]
                    result_list = [dict(zip(columns, row)) for row in rows]
                    
                    if len(result_list) > 1:
                        send_error_alert(
                            f"单据对应业务主体：({len(result_list)}条)，工单Id: {id}，已随机选取一个主体",
                            "业务主体多条预警"
                        )
                        return [random.choice(result_list)]
                    
                    return result_list
                return []


    async def insert_target_batch(self, data: List[Dict[str, Any]]) -> None:
        """
        异步批量插入数据到目标数据库，保证事务一致性
        """
        if not data:
            return
        
        keys = [
            'Id', 'CostNo', 'WorkOrderId', 'AppCode', 'OrderId', 'OrderNo', 'OrderType',
            'WorkOrderType', 'WorkStatus', 'ProName', 'CityName', 'AreaName', 'InstallAddress',
            'CustSettleId', 'CustSettleName', 'CustomerId', 'CustomerName', 'CustStoreId',
            'CustStoreName', 'MainPartId', 'MainPartName', 'ActualCustStoreName',
            'GeneralGoodsNames', 'ArtificialServicePriceName', 'ArtificialServicePrice',
            'ServiceSubjectName', 'SubjectClassCode', 'ServiceSubjectCode', 'InternalPrice',
            'CostRemark', 'CostReason', 'FinishTime', 'CostConfirmTime', 'Privoder',
            'IsCentralize', 'VinNumber', 'GuaVin', 'PlateNumber', 'CompleteTime',
            'CreatePersonName', 'ServiceCode', 'ServiceName', 'ServiceAscription',
            'ActualRecordPersonCode', 'ActualRecordPersonName', 'ActualRecordPersonAscription',
            'SendRemark', 'ServiceRemark', 'TagSign', 'ChangeRemark'
        ]
        
        placeholders = ','.join(['%s'] * len(keys))
        columns = ','.join(f'`{k}`' for k in keys)
        sql = f"INSERT INTO workcount_log ({columns}) VALUES ({placeholders})"
        
        values = []
        for item in data:
            row = []
            for k in keys:
                v = item.get(k)
                if isinstance(v, Decimal):
                    v = str(v)
                row.append(v)
            values.append(tuple(row))
        
        async with self.target_pool.acquire() as conn:
            async with conn.cursor() as cursor:
                try:
                    await cursor.executemany(sql, values)
                    await conn.commit()
                    logger.info(f"[ASYNC_INSERT] 成功插入 {len(values)} 条记录")
                except Exception as e:
                    await conn.rollback()
                    logger.error(f"[ASYNC_INSERT] 插入失败，已回滚: {e}")
                    raise
    
    async def sync_task_async(self, start_time: Optional[str] = None, 
                             end_time: Optional[str] = None,
                             batch_size: int = 1000,
                             max_concurrent_batches: int = 3) -> Dict[str, Any]:
        """
        异步数据同步任务主函数
        
        Args:
            start_time: 开始时间
            end_time: 结束时间
            batch_size: 批处理大小
            max_concurrent_batches: 最大并发批次数
        
        Returns:
            dict: 同步结果统计
        """
        start_time = time.time()
        total_success = 0
        total_error = 0
        error_details = []
        
        logger.info(f"[ASYNC_SYNC] 开始异步数据同步任务")
        logger.info(f"[ASYNC_SYNC] 参数: start_time={start_time}, end_time={end_time}, batch_size={batch_size}")
        
        try:
            # 获取所有主数据
            all_main_data = await self.fetch_main_data(start_time, end_time)
            logger.info(f"[ASYNC_SYNC] 获取主数据总数: {len(all_main_data)}")
            
            # 分批处理
            batches = [all_main_data[i:i+batch_size] for i in range(0, len(all_main_data), batch_size)]
            
            # 使用信号量控制并发批次数
            semaphore = asyncio.Semaphore(max_concurrent_batches)
            
            async def process_batch(batch_data: List[Dict[str, Any]], batch_num: int):
                async with semaphore:
                    try:
                        logger.info(f"[ASYNC_SYNC] 开始处理批次 {batch_num}, 记录数: {len(batch_data)}")
                        
                        # 使用新的数据组装流程
                        assembled_data = await self.assemble_complete_data(batch_data)
                        
                        # 插入目标数据库
                        await self.insert_target_batch(assembled_data)
                        
                        logger.info(f"[ASYNC_SYNC] 批次 {batch_num} 处理完成")
                        return len(batch_data), 0, []
                        
                    except Exception as e:
                        error_msg = f"批次 {batch_num} 处理失败: {str(e)}"
                        logger.error(error_msg)
                        return 0, len(batch_data), [error_msg]
            
            tasks = [process_batch(batch, i+1) for i, batch in enumerate(batches)]
            results = await asyncio.gather(*tasks, return_exceptions=True)
            
            for result in results:
                if isinstance(result, Exception):
                    error_details.append(f"任务执行异常: {str(result)}")
                    total_error += batch_size
                else:
                    success, error, errors = result
                    total_success += success
                    total_error += error
                    error_details.extend(errors)
            
            duration = time.time() - start_time
            logger.info(f"[ASYNC_SYNC] 同步完成, 成功={total_success}, 失败={total_error}, 耗时={duration:.2f}s")
            
            return {
                'success_count': total_success,
                'error_count': total_error,
                'error_details': error_details,
                'duration': duration
            }
            
        except Exception as e:
            logger.exception(f"[ASYNC_SYNC] 同步任务执行失败: {e}")
            return {
                'success_count': total_success,
                'error_count': total_error + len(all_main_data) - total_success,
                'error_details': error_details + [f"任务执行异常: {str(e)}"],
                'duration': time.time() - start_time
            }

    async def transform_fields_by_ids(self, ids: List[str]) -> Dict[str, Dict[str, Any]]:
        """
        根据ID列表查询和转换字段
        
        Args:
            ids: 需要转换的ID列表
            
        Returns:
            dict: {id: {转换后的字段}} 的映射
        """
        if not ids:
            return {}
            
        async with self.source_pool.acquire() as conn:
            async with conn.cursor() as cursor:
                # 构建IN查询
                placeholders = ','.join(['%s'] * len(ids))
                
                # 示例：根据ID查询额外字段进行转换
                sql = f"""
                SELECT 
                    Id,
                    CASE 
                        WHEN ServiceCode LIKE 'SRV%' THEN CONCAT('服务-', ServiceName)
                        ELSE ServiceName 
                    END AS TransformedServiceName,
                    CASE 
                        WHEN InternalPrice > 1000 THEN '高价值'
                        WHEN InternalPrice > 500 THEN '中价值'
                        ELSE '低价值'
                    END AS PriceLevel,
                    DATE_FORMAT(CompleteTime, '%Y-%m-%d') AS CompleteDateOnly,
                    UPPER(WorkStatus) AS UpperWorkStatus
                FROM vi_workcount_log 
                WHERE Id IN ({placeholders})
                """
                
                await cursor.execute(sql, ids)
                rows = await cursor.fetchall()
                
                result = {}
                for row in rows:
                    result[str(row[0])] = {
                        'TransformedServiceName': row[1],
                        'PriceLevel': row[2],
                        'CompleteDateOnly': row[3],
                        'UpperWorkStatus': row[4]
                    }
                
                logger.info(f"[TRANSFORM_FIELDS] 转换了 {len(result)} 个ID的字段")
                return result

    async def assemble_complete_data(self, main_data_batch: List[Dict[str, Any]]) -> List[Dict[str, Any]]:
        """
        组装完整的数据，包括主数据、主体数据和其他相关数据
        
        Args:
            main_data_batch: fetch_main_data返回的主数据批次
            
        Returns:
            List[Dict]: 组装完成的完整数据
        """
        if not main_data_batch:
            return []
            
        logger.info(f"[ASSEMBLE_DATA] 开始组装 {len(main_data_batch)} 条数据")
        
        # 1. 提取所有ID用于后续查询
        main_ids = [str(item['id']) for item in main_data_batch]
        
        # 2. 并发获取各种数据
        tasks = [
            self.fetch_detail_data_batch(main_data_batch),  # 获取详细数据
            self.transform_fields_by_ids(main_ids),  # 转换字段
            self.fetch_mainpart_data_batch(main_ids),  # 批量获取主体数据
        ]
        
        detail_data, transformed_fields, mainpart_data = await asyncio.gather(*tasks)
        
        # 3. 使用Polars进行数据组装
        try:
            # 转换为Polars DataFrame
            main_df = pl.DataFrame(main_data_batch)
            
            # 如果有详细数据，转换为DataFrame并按ID合并
            if detail_data:
                detail_df = pl.DataFrame(detail_data)
                # 确保ID字段类型一致
                main_df = main_df.with_columns(pl.col("id").cast(pl.Utf8))
                if "Id" in detail_df.columns:
                    detail_df = detail_df.with_columns(pl.col("Id").cast(pl.Utf8))
                    # 左连接详细数据
                    main_df = main_df.join(detail_df, left_on="id", right_on="Id", how="left")
            
            # 添加转换字段
            if transformed_fields:
                transform_data = []
                for id_val, fields in transformed_fields.items():
                    row = {"id": id_val}
                    row.update(fields)
                    transform_data.append(row)
                
                if transform_data:
                    transform_df = pl.DataFrame(transform_data)
                    main_df = main_df.join(transform_df, on="id", how="left")
            
            # 添加主体数据
            if mainpart_data:
                mainpart_df = pl.DataFrame(mainpart_data)
                if "work_order_id" in mainpart_df.columns:
                    # 假设通过work_order_id关联
                    main_df = main_df.join(mainpart_df, left_on="id", right_on="work_order_id", how="left")
            
            # 转换回字典列表
            assembled_data = main_df.to_dicts()
            
            logger.info(f"[ASSEMBLE_DATA] 成功组装 {len(assembled_data)} 条完整数据")
            return assembled_data
            
        except Exception as e:
            logger.error(f"[ASSEMBLE_DATA] Polars数据组装失败: {e}")
            # 降级到传统方式组装
            return await self._fallback_assemble_data(main_data_batch, detail_data, transformed_fields, mainpart_data)

    async def _fallback_assemble_data(self, main_data: List[Dict], detail_data: List[Dict], 
                                    transformed_fields: Dict, mainpart_data: List[Dict]) -> List[Dict]:
        """
        降级的数据组装方法（传统字典操作）
        """
        logger.info("[ASSEMBLE_DATA] 使用降级方式组装数据")
        
        # 创建索引映射
        detail_map = {str(item.get('Id', item.get('id', ''))): item for item in detail_data} if detail_data else {}
        mainpart_map = {str(item.get('work_order_id', item.get('id', ''))): item for item in mainpart_data} if mainpart_data else {}
        
        assembled_data = []
        for main_item in main_data:
            main_id = str(main_item['id'])
            
            # 组装数据
            assembled_item = main_item.copy()
            
            # 合并详细数据
            if main_id in detail_map:
                assembled_item.update(detail_map[main_id])
            
            # 合并转换字段
            if main_id in transformed_fields:
                assembled_item.update(transformed_fields[main_id])
            
            # 合并主体数据
            if main_id in mainpart_map:
                mainpart_item = mainpart_map[main_id]
                # 添加前缀避免字段冲突
                for key, value in mainpart_item.items():
                    assembled_item[f"mainpart_{key}"] = value
            
            assembled_data.append(assembled_item)
        
        return assembled_data

    async def fetch_mainpart_data_batch(self, ids: List[str]) -> List[Dict[str, Any]]:
        """
        批量获取主体数据
        
        Args:
            ids: 工单ID列表
            
        Returns:
            List[Dict]: 主体数据列表
        """
        if not ids:
            return []
            
        async with self.source_pool.acquire() as conn:
            async with conn.cursor() as cursor:
                placeholders = ','.join(['%s'] * len(ids))
                
                sql = f"""
                SELECT 
                    WorkOrderId as work_order_id,
                    MainPartId as mainpart_id,
                    MainPartName as mainpart_name,
                    MainPartType as mainpart_type,
                    MainPartStatus as mainpart_status
                FROM tb_workorder_mainpart 
                WHERE WorkOrderId IN ({placeholders})
                AND Deleted = 0
                """
                
                await cursor.execute(sql, ids)
                rows = await cursor.fetchall()
                
                if rows:
                    columns = [desc[0] for desc in cursor.description]
                    result_list = [dict(zip(columns, row)) for row in rows]
                    
                    # 检查是否有重复的工单ID
                    id_counts = {}
                    for item in result_list:
                        work_order_id = str(item['work_order_id'])
                        id_counts[work_order_id] = id_counts.get(work_order_id, 0) + 1
                    
                    # 对于有多条记录的工单ID发送预警
                    for work_order_id, count in id_counts.items():
                        if count > 1:
                            send_error_alert(
                                f"工单主体数据重复：工单ID {work_order_id} 有 {count} 条记录",
                                "主体数据重复预警"
                            )
                    
                    logger.info(f"[FETCH_MAINPART_BATCH] 获取主体数据 {len(result_list)} 条")
                    return result_list
                
                return []

    async def fetch_detail_data_batch(self, main_data_batch: List[Dict[str, Any]]) -> List[Dict[str, Any]]:
        """
        批量获取详细数据（重写以适配新的组装流程）
        """
        if not main_data_batch:
            return []
            
        async with self.source_pool.acquire() as conn:
            async with conn.cursor() as cursor:
                ids = [str(item['id']) for item in main_data_batch]
                placeholders = ','.join(['%s'] * len(ids))
                
                sql = f"""
                SELECT 
                    Id, WorkOrderId, CostNo, AppCode, OrderId, OrderNo, OrderType,
                    WorkOrderType, WorkStatus, ProName, CityName, AreaName, InstallAddress,
                    CustSettleId, CustSettleName, CustomerId, CustomerName, CustStoreId,
                    CustStoreName, ActualCustStoreName, GeneralGoodsNames, 
                    ArtificialServicePriceName, ArtificialServicePrice, ServiceSubjectName,
                    SubjectClassCode, ServiceSubjectCode, InternalPrice, CostRemark, 
                    CostReason, FinishTime, CostConfirmTime, Privoder, IsCentralize,
                    VinNumber, GuaVin, PlateNumber, CompleteTime, CreatePersonName,
                    ServiceCode, ServiceName, ServiceAscription, ActualRecordPersonCode,
                    ActualRecordPersonName, ActualRecordPersonAscription, SendRemark,
                    ServiceRemark, TagSign, ChangeRemark
                FROM vi_workcount_log
                WHERE Id IN ({placeholders})
                """
                
                await cursor.execute(sql, ids)
                rows = await cursor.fetchall()
                
                if rows:
                    columns = [desc[0] for desc in cursor.description]
                    result_list = [dict(zip(columns, row)) for row in rows]
                    logger.info(f"[FETCH_DETAIL_BATCH] 获取详细数据 {len(result_list)} 条")
                    return result_list
                
                return []


# 便捷函数，用于在现有调度器中调用
async def async_sync_task(start_time: Optional[str] = None,
                         end_time: Optional[str] = None,
                         batch_size: int = 1000,
                         max_concurrent_batches: int = 3,
                         max_connections: int = 10,
                         **kwargs) -> Dict[str, Any]:
    """
    异步同步任务的便捷入口函数
    """
    async with BasicOrderHandler(max_connections=max_connections) as handler:
        return await handler.sync_task_async(
            start_time=start_time,
            end_time=end_time,
            batch_size=batch_size,
            max_concurrent_batches=max_concurrent_batches
        )


# 同步包装器，用于在非异步环境中调用
def sync_task_with_async(start_time: Optional[str] = None,
                        end_time: Optional[str] = None,
                        batch_size: int = 1000,
                        max_concurrent_batches: int = 3,
                        max_connections: int = 10,
                        **kwargs) -> Dict[str, Any]:
    """
    在同步环境中运行异步同步任务
    """
    return asyncio.run(async_sync_task(
        start_time=start_time,
        end_time=end_time,
        batch_size=batch_size,
        max_concurrent_batches=max_concurrent_batches,
        max_connections=max_connections,
        **kwargs
    ))