#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
RedFire MySQL → PostgreSQL 实时数据同步机制
===========================================

功能特性:
- 实时增量数据同步
- CDC (Change Data Capture) 支持
- 数据类型自动转换
- 故障恢复和断点续传
- 性能优化和监控
- VnPy 数据格式兼容

Author: RedFire Team
Date: 2025-09-16
Version: v1.0.0
"""

import asyncio
import logging
from datetime import datetime, timedelta
from typing import Dict, List, Optional, Tuple, Any
import json
import hashlib
from dataclasses import dataclass, asdict
from contextlib import asynccontextmanager
import aiomysql
import asyncpg
import redis.asyncio as redis
from prometheus_client import Counter, Histogram, Gauge
import traceback

# 配置日志
logging.basicConfig(
    level=logging.INFO,
    format='%(asctime)s - %(name)s - %(levelname)s - %(message)s'
)
logger = logging.getLogger(__name__)

# Prometheus 监控指标
SYNC_RECORDS_COUNTER = Counter('mysql_pg_sync_records_total', 'Total synced records', ['table', 'status'])
SYNC_DURATION_HISTOGRAM = Histogram('mysql_pg_sync_duration_seconds', 'Sync duration', ['table'])
SYNC_LAG_GAUGE = Gauge('mysql_pg_sync_lag_seconds', 'Sync lag in seconds', ['table'])
SYNC_ERRORS_COUNTER = Counter('mysql_pg_sync_errors_total', 'Sync errors', ['table', 'error_type'])


@dataclass
class SyncConfig:
    """同步配置"""
    mysql_config: Dict[str, Any]
    postgresql_config: Dict[str, Any]
    redis_config: Dict[str, Any]
    
    # 同步设置
    batch_size: int = 1000
    sync_interval: int = 5  # 秒
    max_retries: int = 3
    
    # 表映射配置
    table_mappings: Dict[str, str] = None
    
    # 数据保留策略
    mysql_retention_days: int = 3  # MySQL保留天数
    
    def __post_init__(self):
        if self.table_mappings is None:
            self.table_mappings = {
                'dbtickdata': 'vnpy_tick_history',
                'dbbardata': 'vnpy_bar_history',
                'dborderdata': 'vnpy_order_history',
                'dbtradedata': 'vnpy_trade_history',
                'dbpositiondata': 'vnpy_position_history',
                'dbaccountdata': 'vnpy_account_history'
            }


@dataclass
class SyncRecord:
    """同步记录"""
    table_name: str
    operation: str  # INSERT, UPDATE, DELETE
    primary_key: str
    data: Dict[str, Any]
    sync_timestamp: datetime
    checksum: str = None
    
    def __post_init__(self):
        if self.checksum is None:
            self.checksum = self._calculate_checksum()
    
    def _calculate_checksum(self) -> str:
        """计算数据校验和"""
        data_str = json.dumps(self.data, sort_keys=True, default=str)
        return hashlib.md5(data_str.encode()).hexdigest()


class DataTypeConverter:
    """MySQL → PostgreSQL 数据类型转换器"""
    
    @staticmethod
    def mysql_to_postgresql_type(mysql_type: str) -> str:
        """MySQL数据类型转PostgreSQL类型"""
        type_mapping = {
            'TINYINT': 'SMALLINT',
            'SMALLINT': 'SMALLINT',
            'MEDIUMINT': 'INTEGER',
            'INT': 'INTEGER',
            'INTEGER': 'INTEGER',
            'BIGINT': 'BIGINT',
            'FLOAT': 'REAL',
            'DOUBLE': 'NUMERIC(28,8)',
            'DECIMAL': 'NUMERIC',
            'VARCHAR': 'VARCHAR',
            'CHAR': 'CHAR',
            'TEXT': 'TEXT',
            'LONGTEXT': 'TEXT',
            'DATETIME': 'TIMESTAMP',
            'TIMESTAMP': 'TIMESTAMP WITH TIME ZONE',
            'DATE': 'DATE',
            'TIME': 'TIME',
            'YEAR': 'SMALLINT',
            'TINYBLOB': 'BYTEA',
            'BLOB': 'BYTEA',
            'MEDIUMBLOB': 'BYTEA',
            'LONGBLOB': 'BYTEA',
            'JSON': 'JSONB',
            'BOOLEAN': 'BOOLEAN',
            'BIT': 'BIT'
        }
        
        mysql_type_upper = mysql_type.upper()
        for mysql_key, pg_type in type_mapping.items():
            if mysql_type_upper.startswith(mysql_key):
                return pg_type
        
        return 'TEXT'  # 默认类型
    
    @staticmethod
    def convert_value(value: Any, mysql_type: str) -> Any:
        """转换具体数值"""
        if value is None:
            return None
        
        mysql_type_upper = mysql_type.upper()
        
        # 数值类型处理
        if mysql_type_upper.startswith(('DOUBLE', 'FLOAT', 'DECIMAL')):
            try:
                return float(value) if value != '' else 0.0
            except (ValueError, TypeError):
                return 0.0
        
        # 整数类型处理
        if mysql_type_upper.startswith(('TINYINT', 'SMALLINT', 'MEDIUMINT', 'INT', 'BIGINT')):
            try:
                return int(value) if value != '' else 0
            except (ValueError, TypeError):
                return 0
        
        # 时间类型处理
        if mysql_type_upper.startswith(('DATETIME', 'TIMESTAMP')):
            if isinstance(value, str):
                try:
                    return datetime.fromisoformat(value.replace('Z', '+00:00'))
                except ValueError:
                    return datetime.now()
            return value
        
        # 字符串类型
        if isinstance(value, (bytes, bytearray)):
            return value.decode('utf-8', errors='ignore')
        
        return str(value)


class MySQLChangeDataCapture:
    """MySQL CDC (Change Data Capture) 实现"""
    
    def __init__(self, mysql_config: Dict[str, Any], redis_client):
        self.mysql_config = mysql_config
        self.redis_client = redis_client
        self.connection_pool = None
        self.binlog_position = None
        
    async def initialize(self):
        """初始化MySQL连接和binlog位置"""
        self.connection_pool = await aiomysql.create_pool(**self.mysql_config)
        
        # 获取当前binlog位置
        async with self.connection_pool.acquire() as conn:
            async with conn.cursor() as cursor:
                await cursor.execute("SHOW MASTER STATUS")
                result = await cursor.fetchone()
                if result:
                    self.binlog_position = {
                        'file': result[0],
                        'position': result[1]
                    }
                    await self.redis_client.hset(
                        'mysql_binlog_position', 
                        mapping=self.binlog_position
                    )
        
        logger.info(f"MySQL CDC initialized at position: {self.binlog_position}")
    
    async def get_incremental_changes(self, table_name: str, last_sync_time: datetime) -> List[SyncRecord]:
        """获取增量变更数据 (模拟CDC, 实际可使用mysql-replication库)"""
        records = []
        
        async with self.connection_pool.acquire() as conn:
            async with conn.cursor(aiomysql.DictCursor) as cursor:
                # 查询自上次同步以来的新增/更新数据
                sql = f"""
                SELECT * FROM {table_name} 
                WHERE datetime >= %s 
                ORDER BY datetime ASC 
                LIMIT 5000
                """
                
                await cursor.execute(sql, (last_sync_time,))
                rows = await cursor.fetchall()
                
                for row in rows:
                    # 确定主键 (假设为id或symbol+datetime组合)
                    if 'id' in row:
                        primary_key = str(row['id'])
                    else:
                        primary_key = f"{row.get('symbol', '')}_{row.get('datetime', '')}"
                    
                    record = SyncRecord(
                        table_name=table_name,
                        operation='INSERT',  # 简化处理，实际需要区分INSERT/UPDATE
                        primary_key=primary_key,
                        data=dict(row),
                        sync_timestamp=datetime.now()
                    )
                    records.append(record)
        
        logger.info(f"Found {len(records)} incremental changes for table {table_name}")
        return records
    
    async def cleanup(self):
        """清理资源"""
        if self.connection_pool:
            self.connection_pool.close()
            await self.connection_pool.wait_closed()


class PostgreSQLSyncTarget:
    """PostgreSQL同步目标"""
    
    def __init__(self, postgresql_config: Dict[str, Any]):
        self.postgresql_config = postgresql_config
        self.connection_pool = None
        self.converter = DataTypeConverter()
    
    async def initialize(self):
        """初始化PostgreSQL连接池"""
        self.connection_pool = await asyncpg.create_pool(**self.postgresql_config)
        logger.info("PostgreSQL connection pool initialized")
    
    async def ensure_table_exists(self, source_table: str, target_table: str, sample_data: Dict[str, Any]):
        """确保目标表存在，如果不存在则自动创建"""
        async with self.connection_pool.acquire() as conn:
            # 检查表是否存在
            table_exists = await conn.fetchval("""
                SELECT EXISTS (
                    SELECT FROM information_schema.tables 
                    WHERE table_schema = 'public' 
                    AND table_name = $1
                )
            """, target_table)
            
            if not table_exists:
                logger.info(f"Creating target table: {target_table}")
                await self._create_table_from_sample(conn, target_table, sample_data)
    
    async def _create_table_from_sample(self, conn, target_table: str, sample_data: Dict[str, Any]):
        """从样本数据自动创建表结构"""
        columns = []
        
        for column_name, value in sample_data.items():
            if column_name == 'id':
                columns.append(f"{column_name} BIGSERIAL PRIMARY KEY")
                continue
                
            # 根据数据值推断类型
            if isinstance(value, (int, float)):
                pg_type = "NUMERIC(28,8)"
            elif isinstance(value, datetime):
                pg_type = "TIMESTAMP(3)"
            elif isinstance(value, str):
                pg_type = f"VARCHAR({max(255, len(value) * 2)})"
            else:
                pg_type = "TEXT"
            
            columns.append(f"{column_name} {pg_type}")
        
        create_sql = f"""
            CREATE TABLE {target_table} (
                {', '.join(columns)}
            )
        """
        
        await conn.execute(create_sql)
        logger.info(f"Created table {target_table} with {len(columns)} columns")
    
    async def upsert_records(self, target_table: str, records: List[SyncRecord]) -> Dict[str, int]:
        """批量插入或更新记录"""
        if not records:
            return {'inserted': 0, 'updated': 0, 'errors': 0}
        
        stats = {'inserted': 0, 'updated': 0, 'errors': 0}
        
        async with self.connection_pool.acquire() as conn:
            async with conn.transaction():
                for record in records:
                    try:
                        # 转换数据类型
                        converted_data = {}
                        for key, value in record.data.items():
                            converted_data[key] = self.converter.convert_value(value, 'VARCHAR')  # 简化处理
                        
                        # 构建UPSERT SQL (PostgreSQL的ON CONFLICT语法)
                        columns = list(converted_data.keys())
                        values = list(converted_data.values())
                        placeholders = [f'${i+1}' for i in range(len(values))]
                        
                        # 假设使用datetime作为冲突检测字段
                        conflict_column = 'datetime' if 'datetime' in columns else columns[0]
                        
                        upsert_sql = f"""
                            INSERT INTO {target_table} ({', '.join(columns)})
                            VALUES ({', '.join(placeholders)})
                            ON CONFLICT ({conflict_column}) 
                            DO UPDATE SET {', '.join([f"{col} = EXCLUDED.{col}" for col in columns])}
                        """
                        
                        await conn.execute(upsert_sql, *values)
                        stats['inserted'] += 1
                        
                    except Exception as e:
                        logger.error(f"Error upserting record {record.primary_key}: {e}")
                        stats['errors'] += 1
                        SYNC_ERRORS_COUNTER.labels(table=target_table, error_type='upsert_error').inc()
        
        return stats
    
    async def cleanup(self):
        """清理资源"""
        if self.connection_pool:
            await self.connection_pool.close()


class MySQLPostgreSQLSyncManager:
    """MySQL → PostgreSQL 同步管理器"""
    
    def __init__(self, config: SyncConfig):
        self.config = config
        self.mysql_cdc = None
        self.postgresql_target = None
        self.redis_client = None
        self.sync_status = {}
        self.running = False
    
    async def initialize(self):
        """初始化同步管理器"""
        # 初始化Redis连接
        self.redis_client = redis.Redis(**self.config.redis_config)
        
        # 初始化MySQL CDC
        self.mysql_cdc = MySQLChangeDataCapture(
            self.config.mysql_config,
            self.redis_client
        )
        await self.mysql_cdc.initialize()
        
        # 初始化PostgreSQL目标
        self.postgresql_target = PostgreSQLSyncTarget(self.config.postgresql_config)
        await self.postgresql_target.initialize()
        
        logger.info("Sync manager initialized successfully")
    
    async def get_last_sync_time(self, table_name: str) -> datetime:
        """获取表的最后同步时间"""
        sync_time_str = await self.redis_client.get(f"sync_last_time:{table_name}")
        if sync_time_str:
            return datetime.fromisoformat(sync_time_str.decode())
        else:
            # 首次同步，从3天前开始
            return datetime.now() - timedelta(days=self.config.mysql_retention_days)
    
    async def update_last_sync_time(self, table_name: str, sync_time: datetime):
        """更新最后同步时间"""
        await self.redis_client.set(
            f"sync_last_time:{table_name}", 
            sync_time.isoformat()
        )
    
    async def sync_table(self, source_table: str, target_table: str) -> Dict[str, Any]:
        """同步单个表"""
        start_time = datetime.now()
        
        try:
            with SYNC_DURATION_HISTOGRAM.labels(table=source_table).time():
                # 获取最后同步时间
                last_sync_time = await self.get_last_sync_time(source_table)
                
                # 获取增量变更
                changes = await self.mysql_cdc.get_incremental_changes(
                    source_table, 
                    last_sync_time
                )
                
                if not changes:
                    logger.debug(f"No changes found for table {source_table}")
                    return {'status': 'success', 'records': 0}
                
                # 确保目标表存在
                await self.postgresql_target.ensure_table_exists(
                    source_table, 
                    target_table, 
                    changes[0].data if changes else {}
                )
                
                # 批量同步数据
                batch_size = self.config.batch_size
                total_stats = {'inserted': 0, 'updated': 0, 'errors': 0}
                
                for i in range(0, len(changes), batch_size):
                    batch = changes[i:i + batch_size]
                    stats = await self.postgresql_target.upsert_records(target_table, batch)
                    
                    for key in total_stats:
                        total_stats[key] += stats[key]
                    
                    # 更新监控指标
                    SYNC_RECORDS_COUNTER.labels(
                        table=source_table, 
                        status='success'
                    ).inc(stats['inserted'] + stats['updated'])
                    
                    if stats['errors'] > 0:
                        SYNC_RECORDS_COUNTER.labels(
                            table=source_table, 
                            status='error'
                        ).inc(stats['errors'])
                
                # 更新同步时间
                current_time = datetime.now()
                await self.update_last_sync_time(source_table, current_time)
                
                # 计算同步延迟
                sync_lag = (current_time - start_time).total_seconds()
                SYNC_LAG_GAUGE.labels(table=source_table).set(sync_lag)
                
                logger.info(f"Table {source_table} sync completed: {total_stats}")
                
                return {
                    'status': 'success',
                    'records': len(changes),
                    'stats': total_stats,
                    'duration': sync_lag
                }
        
        except Exception as e:
            logger.error(f"Error syncing table {source_table}: {e}")
            logger.error(traceback.format_exc())
            SYNC_ERRORS_COUNTER.labels(table=source_table, error_type='sync_error').inc()
            return {'status': 'error', 'error': str(e)}
    
    async def sync_all_tables(self) -> Dict[str, Any]:
        """同步所有配置的表"""
        results = {}
        
        for source_table, target_table in self.config.table_mappings.items():
            result = await self.sync_table(source_table, target_table)
            results[source_table] = result
        
        return results
    
    async def start_continuous_sync(self):
        """启动连续同步"""
        self.running = True
        logger.info("Starting continuous sync process...")
        
        while self.running:
            try:
                start_time = datetime.now()
                results = await self.sync_all_tables()
                
                # 统计同步结果
                total_records = sum(
                    r.get('records', 0) for r in results.values() 
                    if r.get('status') == 'success'
                )
                
                sync_duration = (datetime.now() - start_time).total_seconds()
                
                logger.info(f"Sync cycle completed: {total_records} records in {sync_duration:.2f}s")
                
                # 等待下一个同步周期
                await asyncio.sleep(self.config.sync_interval)
                
            except Exception as e:
                logger.error(f"Error in sync cycle: {e}")
                await asyncio.sleep(self.config.sync_interval * 2)  # 错误时延长等待时间
    
    async def stop_sync(self):
        """停止同步"""
        self.running = False
        logger.info("Sync process stopped")
    
    async def cleanup(self):
        """清理资源"""
        self.running = False
        
        if self.mysql_cdc:
            await self.mysql_cdc.cleanup()
        
        if self.postgresql_target:
            await self.postgresql_target.cleanup()
        
        if self.redis_client:
            await self.redis_client.close()
        
        logger.info("Sync manager cleanup completed")


# 便捷函数
async def create_sync_manager(config_dict: Dict[str, Any]) -> MySQLPostgreSQLSyncManager:
    """创建并初始化同步管理器"""
    config = SyncConfig(**config_dict)
    manager = MySQLPostgreSQLSyncManager(config)
    await manager.initialize()
    return manager


@asynccontextmanager
async def sync_manager_context(config_dict: Dict[str, Any]):
    """同步管理器上下文管理器"""
    manager = await create_sync_manager(config_dict)
    try:
        yield manager
    finally:
        await manager.cleanup()


# 示例配置
EXAMPLE_CONFIG = {
    'mysql_config': {
        'host': 'localhost',
        'port': 3306,
        'user': 'redfire_user',
        'password': 'your_password',
        'db': 'redfire_business',
        'charset': 'utf8mb4',
        'autocommit': True
    },
    'postgresql_config': {
        'host': 'localhost',
        'port': 5432,
        'user': 'redfire_analytics',
        'password': 'your_password',
        'database': 'redfire_analytics',
        'server_settings': {
            'application_name': 'redfire_sync',
        }
    },
    'redis_config': {
        'host': 'localhost',
        'port': 6379,
        'db': 0,
        'decode_responses': False
    },
    'batch_size': 1000,
    'sync_interval': 5,
    'max_retries': 3
}


if __name__ == "__main__":
    async def main():
        """主函数示例"""
        async with sync_manager_context(EXAMPLE_CONFIG) as manager:
            # 一次性同步
            results = await manager.sync_all_tables()
            print("Sync results:", results)
            
            # 或者启动连续同步 (注释掉以避免无限循环)
            # await manager.start_continuous_sync()
    
    asyncio.run(main())
