from typing import Optional, Dict, Any, List
from sqlalchemy import select, and_, func
from src.models.works import Works
from src.common.utils.data_parser import DataParser
from src.common.db.database import get_db_session
from loguru import logger

class WorksService:
    """作品服务类"""
    
    def __init__(self):
        pass
    
    async def save_douyin_work(self, response_data: str, source_url: str) -> Optional[Works]:
        """保存抖音作品数据
        
        Args:
            response_data: 扣子API响应数据
            source_url: 原始请求URL
            
        Returns:
            保存的Works实例，失败返回None
        """
        try:
            # 解析数据
            parsed_data = DataParser.parse_douyin_data(response_data, source_url)
            if not parsed_data:
                logger.error("数据解析失败")
                return None
            
            # 检查是否已存在相同的作品
            existing_work = await self.find_by_source_link(source_url)
            if existing_work:
                logger.info(f"作品已存在，更新数据: {existing_work.id}")
                return await self.update_work(existing_work.id, parsed_data)
            
            # 创建新作品
            async with get_db_session() as session:
                work = Works(**parsed_data)
                session.add(work)
                await session.commit()
                await session.refresh(work)
                
                logger.info(f"成功保存作品: {work.id} - {work.title}")
                return work
                
        except Exception as e:
            logger.error(f"保存抖音作品失败: {str(e)}")
            return None
    
    async def find_by_source_link(self, source_link: str) -> Optional[Works]:
        """根据源链接查找作品
        
        Args:
            source_link: 源链接
            
        Returns:
            Works实例或None
        """
        try:
            async with get_db_session() as session:
                stmt = select(Works).where(
                    and_(
                        Works.source_link == source_link,
                        Works.status == 1
                    )
                )
                result = await session.execute(stmt)
                return result.scalar_one_or_none()
                
        except Exception as e:
            logger.error(f"查找作品失败: {str(e)}")
            return None
    

    
    async def update_work(self, work_id: int, update_data: Dict[str, Any]) -> Optional[Works]:
        """更新作品数据
        
        Args:
            work_id: 作品ID
            update_data: 更新数据
            
        Returns:
            更新后的Works实例
        """
        try:
            async with get_db_session() as session:
                # 查找作品
                stmt = select(Works).where(Works.id == work_id)
                result = await session.execute(stmt)
                work = result.scalar_one_or_none()
                
                if not work:
                    logger.warning(f"作品不存在: {work_id}")
                    return None
                
                # 更新数据
                for key, value in update_data.items():
                    if hasattr(work, key):
                        setattr(work, key, value)
                
                await session.commit()
                await session.refresh(work)
                
                logger.info(f"成功更新作品: {work_id}")
                return work
                
        except Exception as e:
            logger.error(f"更新作品失败: {str(e)}")
            return None
    
    async def get_works_list(self, page: int = 1, page_size: int = 10) -> Dict[str, Any]:
        """获取作品列表
        
        Args:
            page: 页码，从1开始
            page_size: 每页数量
            
        Returns:
            包含列表数据和总数的字典
        """
        try:
            async with get_db_session() as session:
                # 计算偏移量
                offset = (page - 1) * page_size
                
                # 查询总数
                count_stmt = select(func.count(Works.id)).where(Works.status == 1)
                count_result = await session.execute(count_stmt)
                total = count_result.scalar()
                
                # 查询列表数据
                list_stmt = (
                    select(Works)
                    .where(Works.status == 1)
                    .order_by(Works.create_time.desc())
                    .offset(offset)
                    .limit(page_size)
                )
                list_result = await session.execute(list_stmt)
                works_list = list_result.scalars().all()
                
                # 转换为字典格式
                works_data = []
                for work in works_list:
                    work_dict = work.to_dict()
                    works_data.append(work_dict)
                
                logger.info(f"成功获取作品列表: page={page}, page_size={page_size}, total={total}")
                
                return {
                    'list': works_data,
                    'total': total,
                    'page': page,
                    'page_size': page_size,
                    'total_pages': (total + page_size - 1) // page_size
                }
                
        except Exception as e:
            logger.error(f"获取作品列表失败: {str(e)}")
            return {
                'list': [],
                'total': 0,
                'page': page,
                'page_size': page_size,
                'total_pages': 0
            }