from fastapi import APIRouter,Depends,Request,HTTPException, status, Query
from novelapp.config.database import connect
from mongoengine.queryset.visitor import Q
from novelapp.api.auth import get_user
from novelapp.models.models import Books,Comments,User,Collect
from novelapp.common.common import returnData,setLog,check_sensitive_words
from novelapp.api.quene_task import scrape_reviews_task_shuku,fetch_bookInfo_from_api
from pydantic import BaseModel
from datetime import datetime, timedelta
from typing import Optional, List
from tqdm import tqdm
from bson import ObjectId
from mongoengine.errors import ValidationError

import aiohttp
import asyncio
import json,httpx,time



"""  
对查询集进行分页  
"""  
def paginate_query(query_set, page: int = 1, page_size: int = 10):  
    skip = (page - 1) * page_size  
    return query_set.skip(skip).limit(page_size) 
    

# 定义异步函数调用外部API
async def fetch_external_api(url: str, params: dict) -> dict:
    async with httpx.AsyncClient() as client:
        response = await client.get(url, params=params)
        response.raise_for_status()  # 如果响应状态不是200，抛出HTTP异常
        return response.json()


logger = setLog('noveapp.log')
    
router = APIRouter()  

@router.get("/get_list/")
async def get_list(book_type: Optional[str] = Query(None, description="Author to search for"),
    value: Optional[str] = Query(None, description="Author to search for"),
    sort: Optional[str] = Query(None),
    tags: Optional[str] = Query(None),
    countWord: Optional[int] = Query(None),
    all: Optional[int] = Query(None),
    page: int = Query(1, gt=0, description="Page number"),
    page_size: int = Query(10, gt=0, le=100, description="Number of items per page")
):
    # 构建查询条件
    query = Q()
    
    # 检查是否有任何查询条件
    has_filters = any([value, sort, tags, countWord,all])
    
    if not has_filters:
        # 没有查询条件时，随机获取评分大于8分的书籍
        query &= Q(score__gte=7.6)
        # 使用 aggregate 实现随机排序和分页
        pipeline = [
            {'$match': {'score': {'$gte': 7.6}}},
            {'$sample': {'size': 200}},  # 先随机选择一个较大的池子
            {'$skip': (page - 1) * page_size},
            {'$limit': page_size}
        ]
        books = Books.objects.aggregate(pipeline)
        books_list = []
        for book in books:
            book['id'] = str(book['_id'])
            del book['_id']
            books_list.append(book)
            
        total_count = Books.objects(score__gte=8.0).count()
        return returnData(200, '查询成功', {
            "page": page,
            "page_size": page_size,
            "total_count": total_count,
            "books": books_list
        })
    
    if value:
        # 支持书名、作者、标签的模糊查询
        query &= (
            Q(title__icontains=value) |  # 书名模糊查询
            Q(author=value) |           # 作者精确查询
            Q(tags__in=[value])        # 标签数组包含查询
        )
    # if book_type == 'title' and value:
    #     query &= Q(title__icontains=value)
    # elif book_type == 'author' and value:
    #     query &= Q(author__icontains=value)
    # elif book_type == 'tag' and value:
    #     query &= Q(tags__icontains=value)
            
    # 新增条件
    if countWord == 1:
        query &= Q(countWord__lt=1000000)  # 小于100万
    elif countWord == 2:
        query &= Q(countWord__gte=1000000, countWord__lt=3000000)  # 在100万到300万之间
    elif countWord == 3:
        query &= Q(countWord__gte=3000000)  # 300万以上
        
    if tags:
        query &= Q(tags__icontains=tags)  # 标签模糊查询
    
            
    # 计算分页参数
    skip = (page - 1) * page_size
    limit = page_size
    
    # 执行查询
    books = Books.objects(query)

    if sort == 'countWord':
        books = books.order_by('-countWord')  # 倒序排列
    elif sort == 'score':
        books = books.order_by('-score')  # 倒序排列
    elif sort == 'scorer':
        books = books.order_by('-scorerCount')  # 正序排列
    elif sort == 'updateAt':
        books = books.order_by('-updateAt')  # 按更新时间倒序排列
    
    # 添加分页
    books = books.skip(skip).limit(limit)
    
    # 转换为列表
    # books_list = [book.to_dict() for book in books]
    # books_list = [json.loads(book.to_json()) for book in books]
    books_list = []
    for book in books:
        books_data = json.loads(book.to_json())
        books_data['id'] = str(book.id)
        books_list.append(books_data)


    total_count = Books.objects(query).count()
    return returnData(200, '查询成功', {
        "page": page,
        "page_size": page_size,
        "total_count": total_count,
        "books": books_list
    })


@router.get("/get_book_by_ysbookid/")
async def get_book_by_ysbookid(ysbookId: int = Query(..., description="优书网书籍ID")):
    """
    根据ysbookId查询书籍，返回书籍ID
    
    参数:
        - ysbookId: 优书网书籍ID（对应龙空书单的bid）
        
    返回:
        - id: 数据库MongoDB的_id
        - 其他书籍基本信息
    """
    try:
        book = Books.objects(ysbookId=ysbookId).first()
        
        if not book:
            return returnData(404, '书库中没有该书籍', None)
        
        book_info = book.get_book_info()
        return returnData(200, '查询成功', book_info)
        
    except Exception as e:
        logger.error(f"Query book by ysbookId error: {e}")
        return returnData(500, '查询失败', None)


@router.get("/get_detail/")
async def get_detail(book_id: Optional[str] = Query(None, description="Author to search for"), user: Optional[str] = Depends(get_user)):
    if not book_id:
        raise HTTPException(status_code=status.HTTP_400_BAD_REQUEST, detail="信息有误")     
        
    # BooksDetail = Books.objects(id=book_id).first().get_book_info()        
    BooksDetail = Books.objects.get(id=ObjectId(book_id))
    if not BooksDetail:
        return returnData(401, '暂无信息', None)
    # 检查当前书籍是否被用户收藏
    if user:
        is_collected = Collect.objects(user=user.id, book=BooksDetail.id).first() is not None
    else:
        is_collected = False
    # 在后台异步执行更新列表
    # 当前时间
    current_time = datetime.now()
    
    # 检查更新时间
    # logger.error(f"检查更新评论  {book_id}")
    # logger.error(f"检查更新评论  {BooksDetail.lastUpdated + timedelta(hours=24)}-- {current_time}")
    if BooksDetail.lastUpdated + timedelta(hours=24) < current_time  and  BooksDetail.ysbookId > 0:
        logger.error(f"准备更新评论  {BooksDetail.ysbookId}")
        asyncio.create_task(scrape_reviews_task_shuku(BooksDetail.ysbookId))
        
        logger.error(f"准备更新书籍信息  {BooksDetail.ysbookId}")
        book_info_result = await fetch_bookInfo_from_api(BooksDetail.ysbookId)
        
        if book_info_result:
            link = book_info_result["data"]["bookConfig"]["link"]
            wordCount = book_info_result["data"]["bookInfo"]["wordCount"]
            finished = book_info_result["data"]["bookInfo"]["finished"]
            intro = book_info_result["data"]["bookInfo"]["intro"]
            # updateAt = book_info_result["data"]["bookInfo"]["intro"]
            
            BooksDetail.link = link
            BooksDetail.countWord = wordCount
            BooksDetail.status =  1 if finished else 0
            BooksDetail.intro = intro    
            # BooksDetail.updateAt = updateAt    
                    
                        
        
        BooksDetail.lastUpdated = current_time
        BooksDetail.save()
    books_detail_info = BooksDetail.get_book_info()
    return returnData(200, '查询成功', {
        "BooksDetail": books_detail_info,
        "is_collected": is_collected
    })


class toContent(BaseModel):
    book_id: str
    rating:float
    content:str
@router.post("/to_comment/")
async def to_comment(reqs: toContent, user: User = Depends(get_user)):
    book_id_str = reqs.book_id
    rating = reqs.rating
    content = reqs.content

    has_sensitive_words, word = check_sensitive_words(content)
    if has_sensitive_words:
        return returnData(400, f"评论给内容包含敏感词：{word}", {word})
        

    try:
        # 检查book_id是否有效
        book = Books.objects(id=book_id_str).first()
        if not book:
            return returnData(400, '无效的书籍ID', {})
        

        # 将书籍添加到书单中
        # 创建评论并保存
        comment = Comments(
            book_id=book,
            ysbookId=book.ysbookId,
            user_id=user.id,
            username=user.nickname,
            content=content,
            rating=rating,
            ismiao=1  # 默认值为1
        )
        comment.content = content.replace('\n', '\r\n')
        comment.save()


        return returnData(200, '操作成功', {})

    except ValidationError as e:
        raise HTTPException(status_code=400, detail=str(e))
    except Exception as e:
        logger.error(f"Error in to_shudan: {e}")
        raise HTTPException(status_code=500, detail="内部服务器错误")



# 评论列表
@router.get("/comments_list/")
async def comments_list(order_type: Optional[str] = Query(None, description="Author to search for"),    
    book_id: Optional[str] = Query(None, description="Author to search for"),
    page: int = Query(1, gt=0, description="Page number"),
    page_size: int = Query(10, gt=0, le=100, description="Number of items per page")):
       
        
        
        
    # 构建查询条件
    query = Q()
    # 计算分页参数
    skip = (page - 1) * page_size
    limit = page_size
    
    
    if book_id:
        query &= Q(book_id=book_id)
        query &= Q(display=1)
    # 过审    
        # query &= Q(display=2)
    # 根据 order_type 进行不同的排序
    if order_type == '1':
        Comments_list = Comments.objects(query).order_by('-rating').skip((page - 1) * page_size).limit(page_size)
    elif order_type == '2':
        Comments_list = Comments.objects(query).order_by('-upvotes').skip((page - 1) * page_size).limit(page_size)
    elif order_type == '3':
        Comments_list = Comments.objects(query).order_by('-createdAt').skip((page - 1) * page_size).limit(page_size)
    else:
        Comments_list = Comments.objects(query).skip((page - 1) * page_size).limit(page_size)
    
    # 转换为列表
    # books_list = [book.to_dict() for book in books]
    comments_list = []
    for comment in Comments_list:
        comment_data = json.loads(comment.to_json())
        comment_data['id'] = str(comment.id)
        comment_data['book_id'] = str(comment.book_id.id) if comment.book_id else None
        comment_data['createdAt'] = comment.createdAt.strftime('%Y-%m-%d %H:%M:%S')
        comments_list.append(comment_data)

    total_count = Comments.objects(query).count()
    
        

    return returnData(200, '查询成功', {
        "page": page,
        "page_size": page_size,
        "total_count": total_count,
        "comments": comments_list
    })
