"""
MySQL与Elasticsearch同步实现类
处理MySQL binlog到ES的实时同步功能
"""
from elasticsearch import Elasticsearch
import logging
import configparser
import os
from django.conf import settings
import time

# 配置日志
logger = logging.getLogger('elasticsearch')

class MySQLESSync:
    """MySQL到ES同步实现类"""
    
    def __init__(self):
        """初始化同步实例"""
        self.es_client = None
        self.mysql_conn = None
        self.stream = None
        self.running = False
        self.es_index = 'medical_articles'
        
        # 读取MySQL配置
        config = configparser.ConfigParser()
        config_path = os.path.join(settings.BASE_DIR, 'mysql_config.ini')
        try:
            config.read(config_path, encoding='utf-8')
        except:
            config.read(config_path)
        
        self.mysql_settings = {
            'host': config.get('mysql', 'host', fallback='localhost'),
            'port': config.getint('mysql', 'port', fallback=3306),
            'user': config.get('mysql', 'user', fallback='root'),
            'password': config.get('mysql', 'password', fallback='root'),
            'database': config.get('mysql', 'database', fallback='hospital'),
            'charset': 'utf8mb4'
        }
        
        self.server_id = config.getint('binlog', 'server_id', fallback=100)
        self.log_file = config.get('binlog', 'log_file', fallback=None)
        self.log_pos = config.getint('binlog', 'log_pos', fallback=None)
    
    def connect_es(self):
        """连接到Elasticsearch"""
        try:
            self.es_client = Elasticsearch('http://localhost:9200')
            logger.info("成功连接到Elasticsearch")
            return True
        except Exception as e:
            logger.error(f"连接Elasticsearch失败: {str(e)}")
            return False
    
    def connect_mysql(self):
        """连接到MySQL并准备binlog监听"""
        try:
            # 导入pymysqlreplication
            from pymysqlreplication import BinLogStreamReader
            from pymysqlreplication.row_event import (
                DeleteRowsEvent,
                UpdateRowsEvent,
                WriteRowsEvent,
            )
            
            # 先获取当前的binlog位置（如果未指定）
            if not self.log_file or not self.log_pos:
                import pymysql
                conn = pymysql.connect(**self.mysql_settings)
                with conn.cursor() as cursor:
                    cursor.execute("SHOW MASTER STATUS")
                    result = cursor.fetchone()
                    if result:
                        self.log_file = result[0]
                        self.log_pos = int(result[1])
                        logger.info(f"当前binlog位置: {self.log_file}:{self.log_pos}")
                    else:
                        logger.error("无法获取当前binlog位置")
                        return False
                conn.close()
            
            # 设置binlog流监听
            logger.info("创建binlog监听器...")
            
            # 定义binlog处理逻辑
            self.event_map = {
                WriteRowsEvent: self._handle_write,
                UpdateRowsEvent: self._handle_update,
                DeleteRowsEvent: self._handle_delete
            }
            
            logger.info("成功连接到MySQL并配置binlog监听")
            return True
            
        except Exception as e:
            logger.error(f"连接MySQL或配置binlog失败: {str(e)}")
            import traceback
            logger.error(traceback.format_exc())
            return False
    
    def start_sync(self):
        """开始监听binlog并同步数据到ES"""
        if not self.es_client:
            logger.error("ES客户端未初始化，请先调用connect_es()")
            return
            
        self.running = True
        logger.info("开始MySQL到ES的实时同步...")
        
        try:
            # 导入pymysqlreplication
            from pymysqlreplication import BinLogStreamReader
            from pymysqlreplication.row_event import (
                DeleteRowsEvent,
                UpdateRowsEvent,
                WriteRowsEvent,
            )
            
            # 创建binlog流读取器
            self.stream = BinLogStreamReader(
                connection_settings=self.mysql_settings,
                server_id=self.server_id,
                log_file=self.log_file,
                log_pos=self.log_pos,
                only_events=[WriteRowsEvent, UpdateRowsEvent, DeleteRowsEvent],
                blocking=True,
                resume_stream=True
            )
            
            # 开始处理binlog事件
            for binlogevent in self.stream:
                if not self.running:
                    break
                    
                # 获取事件对应的处理方法
                event_handler = self.event_map.get(type(binlogevent))
                if event_handler:
                    try:
                        event_handler(binlogevent)
                    except Exception as e:
                        logger.error(f"处理binlog事件时出错: {str(e)}")
                        
                # 更新当前位置
                self.log_file = self.stream.log_file
                self.log_pos = self.stream.log_pos
                
            logger.info("MySQL到ES的实时同步已停止")
            
        except Exception as e:
            logger.error(f"同步过程中出错: {str(e)}")
            self.running = False
            logger.info("MySQL到ES的实时同步已停止")
            
        finally:
            if self.stream:
                self.stream.close()
    
    def stop_sync(self):
        """停止同步进程"""
        self.running = False
        if self.stream:
            self.stream.close()
        logger.info("正在停止MySQL到ES的同步...")
    
    def _handle_write(self, event):
        """处理INSERT事件"""
        try:
            logger.info(f"收到INSERT事件: 表名={event.table}, 数据库={event.schema}")
            for row in event.rows:
                try:
                    data = row["values"]
                    # 打印行数据，帮助调试
                    logger.info(f"INSERT数据: {data}")
                    
                    # 判断是否为文章表 (医疗文章表可能有不同名称)
                    if event.table == "medical_article" or event.table == "article" or "article" in event.table.lower():
                        logger.info(f"处理文章表数据: 表名={event.table}")
                        self._sync_article(data)
                except Exception as e:
                    logger.error(f"处理INSERT事件行时出错: {str(e)}")
        except Exception as e:
            logger.error(f"处理INSERT事件时出错: {str(e)}")
    
    def _handle_update(self, event):
        """处理UPDATE事件"""
        try:
            logger.info(f"收到UPDATE事件: 表名={event.table}, 数据库={event.schema}")
            for row in event.rows:
                try:
                    data = row["after_values"]
                    # 打印行数据，帮助调试
                    logger.info(f"UPDATE数据: {data}")
                    
                    # 判断是否为文章表 (医疗文章表可能有不同名称)
                    if event.table == "medical_article" or event.table == "article" or "article" in event.table.lower():
                        logger.info(f"处理文章表数据: 表名={event.table}")
                        self._sync_article(data)
                except Exception as e:
                    logger.error(f"处理UPDATE事件行时出错: {str(e)}")
        except Exception as e:
            logger.error(f"处理UPDATE事件时出错: {str(e)}")
    
    def _handle_delete(self, event):
        """处理DELETE事件"""
        try:
            logger.info(f"收到DELETE事件: 表名={event.table}, 数据库={event.schema}")
            for row in event.rows:
                try:
                    data = row["values"]
                    # 打印行数据，帮助调试
                    logger.info(f"DELETE数据: {data}")
                    
                    # 判断是否为文章表 (医疗文章表可能有不同名称)
                    if event.table == "medical_article" or event.table == "article" or "article" in event.table.lower():
                        logger.info(f"处理文章表数据: 表名={event.table}")
                        if "id" in data:
                            # 从ES中删除文档
                            self.es_client.delete(index=self.es_index, id=str(data["id"]))
                            logger.info(f"从ES中删除文章: id={data['id']}")
                except Exception as e:
                    logger.error(f"处理DELETE事件行时出错: {str(e)}")
        except Exception as e:
            logger.error(f"处理DELETE事件时出错: {str(e)}")
    
    def _sync_article(self, data):
        """同步文章数据到ES"""
        if "id" not in data:
            # 检查是否为格式未知的数据
            if any(k.startswith('UNKNOWN_COL') for k in data.keys()):
                logger.info("检测到UNKNOWN_COL格式的数据，尝试转换处理")
                # 尝试根据列顺序映射
                if len(data) >= 2:  # 至少需要ID和标题
                    keys = list(data.keys())
                    id_value = data[keys[0]]
                    title_value = data[keys[1]]
                    logger.info(f"列映射结果: id={id_value}, title={title_value}")
                    
                    # 创建简化的文档对象
                    doc = {
                        "id": id_value,
                        "title": title_value,
                        "content": data.get(keys[2], "") if len(keys) > 2 else "",
                    }
                    
                    # 保存到ES
                    self.es_client.index(index=self.es_index, id=str(id_value), body=doc)
            else:
                logger.error("数据表字段找不到ID字段 'id':", data)
            return
            
        # 构建文档数据
        doc = {
            "id": data["id"],
            "title": data.get("title", ""),
            "content": data.get("content", ""),
            "author": data.get("author", ""),
            "department_name": data.get("department_name", ""),
            "tags": data.get("tags", ""),
            "status": data.get("status", ""),
            "view_count": data.get("view_count", 0),
            "like_count": data.get("like_count", 0),
            "created_at": data.get("created_at"),
            "updated_at": data.get("updated_at")
        }
        
        # 创建或更新文档
        self.es_client.index(index=self.es_index, id=str(data["id"]), body=doc)
    
    def rebuild_index(self):
        """重建ES索引"""
        try:
            # 连接ES
            if not self.es_client:
                if not self.connect_es():
                    return False, "无法连接到Elasticsearch"
                    
            # 删除现有索引（如果存在）
            if self.es_client.indices.exists(index=self.es_index):
                self.es_client.indices.delete(index=self.es_index)
                logger.info(f"已删除现有索引: {self.es_index}")
                
            # 创建新索引
            index_settings = {
                "settings": {"number_of_shards": 1, "number_of_replicas": 0},
                "mappings": {
                    "properties": {
                        "id": {"type": "keyword"},
                        "title": {"type": "text", "analyzer": "standard"},
                        "content": {"type": "text", "analyzer": "standard"},
                        "author": {"type": "text"},
                        "department_name": {"type": "keyword"},
                        "tags": {"type": "text", "analyzer": "standard"},
                        "status": {"type": "keyword"},
                        "view_count": {"type": "integer"},
                        "like_count": {"type": "integer"},
                        "created_at": {"type": "date"},
                        "updated_at": {"type": "date"}
                    }
                }
            }
            
            self.es_client.indices.create(index=self.es_index, body=index_settings)
            logger.info(f"已创建新索引: {self.es_index}")
            
            # 从数据库导入所有文章
            import pymysql
            conn = pymysql.connect(**self.mysql_settings)
            with conn.cursor(pymysql.cursors.DictCursor) as cursor:
                cursor.execute("SELECT * FROM medical_article")
                articles = cursor.fetchall()
                
                if not articles:
                    logger.info("没有找到文章数据")
                    return True, "索引已重建，但没有找到文章数据"
                    
                # 批量索引文章
                bulk_data = []
                for article in articles:
                    doc = {
                        "id": article["id"],
                        "title": article.get("title", ""),
                        "content": article.get("content", ""),
                        "author": article.get("author", ""),
                        "department_name": article.get("department_name", ""),
                        "tags": article.get("tags", ""),
                        "status": article.get("status", ""),
                        "view_count": article.get("view_count", 0),
                        "like_count": article.get("like_count", 0),
                        "created_at": article.get("created_at"),
                        "updated_at": article.get("updated_at")
                    }
                    
                    # 添加到批量操作
                    bulk_data.append(
                        {"index": {"_index": self.es_index, "_id": str(article["id"])}}
                    )
                    bulk_data.append(doc)
                
                if bulk_data:
                    self.es_client.bulk(body=bulk_data, refresh=True)
                    logger.info(f"已将 {len(articles)} 篇文章导入到ES索引")
                    
            conn.close()
            return True, f"ES索引重建成功，已导入 {len(articles)} 篇文章"
            
        except Exception as e:
            logger.error(f"重建ES索引时出错: {str(e)}")
            import traceback
            logger.error(traceback.format_exc())
            return False, f"重建ES索引时出错: {str(e)}" 