#!/usr/bin/env python3
"""
MySQL到Elasticsearch数据同步服务
支持全量同步和增量同步
"""

import asyncio
from datetime import datetime, timedelta
from typing import List, Dict, Any
from sqlalchemy.orm import Session
from elasticsearch import Elasticsearch
import json

from models import get_db, Content, ContentTag, ContentMedia, User, Tag
from services.es_index_manager import ESIndexManager

class DataSyncService:
    """数据同步服务"""
    
    def __init__(self, es_host="http://localhost:9200"):
        self.es_manager = ESIndexManager(es_host)
        self.es = self.es_manager.es
        self.index_name = self.es_manager.index_name
    
    def build_content_document(self, content: Content, db: Session) -> Dict[str, Any]:
        """构建ES文档"""
        # 获取内容标签
        content_tags = db.query(ContentTag).filter(
            ContentTag.content_id == content.id
        ).all()
        
        tag_ids = [ct.tag_id for ct in content_tags]
        tag_names = []
        if tag_ids:
            tags = db.query(Tag).filter(Tag.id.in_(tag_ids)).all()
            tag_names = [tag.name for tag in tags]
        
        # 获取媒体文件
        media_files = db.query(ContentMedia).filter(
            ContentMedia.content_id == content.id
        ).order_by(ContentMedia.sort_order).all()
        
        media_urls = [media.media_url for media in media_files]
        media_types = [media.media_type for media in media_files]
        first_image_url = None
        
        for media in media_files:
            if media.media_type == "image" and media.sort_order == 1:
                first_image_url = media.media_url
                break
        
        # 计算热度分数
        hot_score = self._calculate_hot_score(content)
        time_score = self._calculate_time_score(content.published_at)
        
        # 构建文档
        doc = {
            "content_id": content.id,
            "title": content.title,
            "description": content.description or "",
            "author_id": content.author_id,
            "author_nickname": content.author.nickname if content.author else "",
            "tag_ids": tag_ids,
            "tag_names": tag_names,
            "status": content.status,
            "created_at": content.created_at.isoformat() if content.created_at else None,
            "published_at": content.published_at.isoformat() if content.published_at else None,
            "view_count": content.view_count or 0,
            "like_count": content.like_count or 0,
            "comment_count": content.comment_count or 0,
            "share_count": content.share_count or 0,
            "collect_count": content.collect_count or 0,
            "media_urls": media_urls,
            "media_types": media_types,
            "first_image_url": first_image_url,
            "hot_score": hot_score,
            "time_score": time_score,
            "is_top": content.is_top or False
        }
        
        return doc
    
    def _calculate_hot_score(self, content: Content) -> float:
        """计算热度分数"""
        # 使用log1p缩放互动数据，避免极端值影响
        import math
        
        view_score = math.log1p(content.view_count or 0) * 0.1
        like_score = math.log1p(content.like_count or 0) * 0.3
        comment_score = math.log1p(content.comment_count or 0) * 0.4
        share_score = math.log1p(content.share_count or 0) * 0.2
        
        return view_score + like_score + comment_score + share_score
    
    def _calculate_time_score(self, published_at: datetime) -> float:
        """计算时间分数（时间衰减）"""
        if not published_at:
            return 0.0
        
        now = datetime.utcnow()
        days_ago = (now - published_at).days
        
        # 7天内内容得分高，之后逐渐衰减
        if days_ago <= 7:
            return 1.0
        elif days_ago <= 30:
            return 0.8 - (days_ago - 7) * 0.02
        else:
            return max(0.1, 0.5 - (days_ago - 30) * 0.01)
    
    async def sync_single_content(self, content_id: int):
        """同步单个内容"""
        db = next(get_db())
        try:
            content = db.query(Content).filter(Content.id == content_id).first()
            if not content:
                print(f"内容 {content_id} 不存在")
                return False
            
            doc = self.build_content_document(content, db)
            
            # 更新或创建ES文档
            response = self.es.index(
                index=self.index_name,
                id=content_id,
                body=doc
            )
            
            print(f"内容 {content_id} 同步成功")
            return True
            
        except Exception as e:
            print(f"同步内容 {content_id} 失败: {e}")
            return False
        finally:
            db.close()
    
    async def sync_published_contents(self, limit: int = 1000):
        """同步已发布的内容"""
        db = next(get_db())
        try:
            # 获取已发布的内容
            contents = db.query(Content).filter(
                Content.status == 2  # 已发布
            ).limit(limit).all()
            
            print(f"开始同步 {len(contents)} 条已发布内容")
            
            # 批量同步
            bulk_data = []
            for content in contents:
                doc = self.build_content_document(content, db)
                
                bulk_data.append({
                    "index": {
                        "_index": self.index_name,
                        "_id": content.id
                    }
                })
                bulk_data.append(doc)
            
            if bulk_data:
                response = self.es.bulk(body=bulk_data)
                print(f"批量同步完成，处理了 {len(contents)} 条内容")
                return True
            
        except Exception as e:
            print(f"批量同步失败: {e}")
            return False
        finally:
            db.close()
    
    async def sync_incremental(self, hours: int = 1):
        """增量同步（最近N小时的内容）"""
        db = next(get_db())
        try:
            since_time = datetime.utcnow() - timedelta(hours=hours)
            
            # 获取最近更新的内容
            contents = db.query(Content).filter(
                Content.status == 2,  # 已发布
                Content.updated_at >= since_time
            ).all()
            
            print(f"增量同步 {len(contents)} 条内容")
            
            for content in contents:
                await self.sync_single_content(content.id)
            
            return True
            
        except Exception as e:
            print(f"增量同步失败: {e}")
            return False
        finally:
            db.close()
    
    def delete_content(self, content_id: int):
        """删除ES中的内容"""
        try:
            self.es.delete(
                index=self.index_name,
                id=content_id
            )
            print(f"内容 {content_id} 从ES中删除成功")
            return True
        except Exception as e:
            print(f"删除内容 {content_id} 失败: {e}")
            return False

# 定时同步任务
class SyncScheduler:
    """同步调度器"""
    
    def __init__(self):
        self.sync_service = DataSyncService()
    
    async def full_sync_task(self):
        """全量同步任务"""
        print("开始全量同步...")
        await self.sync_service.sync_published_contents()
        print("全量同步完成")
    
    async def incremental_sync_task(self):
        """增量同步任务"""
        print("开始增量同步...")
        await self.sync_service.sync_incremental(hours=1)
        print("增量同步完成")

if __name__ == "__main__":
    # 测试数据同步
    async def test_sync():
        sync_service = DataSyncService()
        await sync_service.sync_published_contents(limit=10)
    
    asyncio.run(test_sync())
