from fastapi import APIRouter, Depends, Request, HTTPException, status, Query, BackgroundTasks
from novelapp.config.database import connect
from novelapp.models.models import Books, Comments
from pydantic import BaseModel
from novelapp.common.common import returnData,setLog
import json, httpx, time, requests, re
from datetime import datetime
from bs4 import BeautifulSoup
import aiohttp
import asyncio
import logging
import random
from aiohttp import web


router = APIRouter()

# 指定日志文件
today = datetime.now().strftime('%Y-%m-%d')
log_file = f'longkong_log_{today}.log'
logger = setLog(log_file)

tasks = {}
# 定义批量入库的阈值
BATCH_SIZE = 60

queue1 = asyncio.Queue()
data_queue = asyncio.Queue()

# 常见的浏览器用户代理列表
user_agents = [
    "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36",
    "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36",
    "Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:89.0) Gecko/20100101 Firefox/89.0",
    "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/14.1.2 Safari/605.1.15",
]
# Hm_lvt_08d61b09ba0306dd35d682cf285eb4e5=1747296063,1749786036; EGG_SESS=9hE6_CAfdET4Sp91YibMSC6bcb7eTurnX0i5lBHY1RnLDReWlOJnorFN26L4EIGvj3mZP2nBIVgWn_8zaN662K3LjrMmxrFUa65hFH6Iup4EVgL6wyj9UYM0DVD4GFEzPtsersRCCitT-QIIFetutjHCSukZQrXQhxVhwREQBTTw8m9ZYFXxCkxeYJVlCv0F
cookies_value = "EGG_SESS=qIOxTFmy7ETN9HKDjjgn5RN-SUjM7N7nRTCgRfiyyt-CkrKs6OiOuBOk_FeWy-Y2JuYxQtMfzOzg0gKFjCuPDqwsjeP0pzekb3DYJpy32715ucb1BOuHl5JuU8rrqcP_jedKi_LLyV3kzX408BPmPGXiPXd7HwlLgYmCvyIwwPWu7hyVGsz-u3_5GTWVDc2p"


async def fetch_reviews_from_api(bid, page, session):
    url = "https://api.lkong.com/api"
    data = {
        "variables": {
            "bid": bid,
            "page": page,
            "action": ""
        },
        "query": "query ($bid: Int!, $page: Int!, $action: String!) {\n  bookReviews(bid: $bid, page: $page, action: $action) {\n    hasMore\n    data {\n      rid\n      bid\n      author {\n        uid\n        name\n        avatar\n        __typename\n      }\n      score\n      content\n      createTime\n      updateTime\n      likes\n      liked {\n        up\n        down\n        __typename\n      }\n      __typename\n    }\n    __typename\n  }\n}"
    }
    # 随机选择用户代理和添加随机头部信息
    headers = {
        "User-Agent": random.choice(user_agents),
        "Accept-Language": random.choice(["en-US", "en-GB", "zh-CN"]),
        "Cookie": cookies_value
    }
    try:
        async with session.post(url, json=data, headers=headers, ssl=False) as response:
            response.raise_for_status()
            result = await response.json()
            return result['data']['bookReviews']['hasMore'], result['data']['bookReviews']['data']
    except aiohttp.ClientError as e:
        logger.error(f"Error fetching from API for bid {bid}, page {page}: {e}")
        return False, []

# 书籍详细信息更新 
# 书籍详细信息更新 
async def fetch_bookInfo_from_api(bid):
    url = "https://api.lkong.com/api"
    data = {
        "variables": {
            "bid": bid,
        },
        "query":"query ($bid: Int!) {\n  bookConfig(bid: $bid) {\n    bid\n    banned\n    moveTo\n    link\n    __typename\n  }\n  bookInfo(bid: $bid) {\n    bid\n    title\n    author\n    cover\n    wordCount\n    finished\n    tags\n    intro\n    __typename\n  }\n  bookCount(bid: $bid) {\n    score\n    rateCount\n    __typename\n  }\n  bookRatePercent(bid: $bid)\n}"
    }
    # 随机选择用户代理和添加随机头部信息
    headers = {
        "User-Agent": random.choice(user_agents),
        "Accept-Language": random.choice(["en-US", "en-GB", "zh-CN"]),
        "Cookie": cookies_value
    }
    async with aiohttp.ClientSession() as session:
        try:
            async with session.post(url, json=data, headers=headers, ssl=False) as response:
            # async with session.post(url, json=data, headers=headers) as response:
                response.raise_for_status()
                result = await response.json()
                return result
        except aiohttp.ClientError as e:
            print(f"Error fetching from API for bid {bid}: {e}")
            return None       
        

async def scrape_reviews_task_shuku(book_id):
    page = 1
    total_comments = 0
    bookInfo = Books.objects(ysbookId=book_id).first()

    if not bookInfo:
        
        
        logger.warning(f"Book ID {book_id} not found.")
        return
    ysbookId = bookInfo.ysbookId
    
    # Comments.objects(ysbookId=book_id).delete()
    async with aiohttp.ClientSession() as session:
        while True:
            await asyncio.sleep(random.uniform(0.1, 2))
            has_more, reviews = await fetch_reviews_from_api(book_id, page, session)
            
            if not reviews:
                break
            
            comments_to_save = []
            for review in reviews:
                try:
                    content_data = json.loads(review['content'])
                    text_parts = [child['text'] for part in content_data for child in part['children']]
                    content_text = "<br/>".join(text_parts)
                    
                    create_time_ms = review['createTime']
                    create_time_dt = datetime.fromtimestamp(create_time_ms / 1000.0)
                    formatted_create_time = create_time_dt.strftime('%Y-%m-%d %H:%M:%S.000')
                    
                    comment = Comments(
                        book_id=bookInfo.id,
                        ysbookId=book_id,
                        username=review['author']['name'],
                        createdAt=formatted_create_time,
                        rating=review['score'],
                        content=content_text,
                        upvotes=review['likes']
                    )
                    bc = Comments.objects(book_id=comment.book_id, createdAt=comment.createdAt).first()
                    if not bc:
                        comments_to_save.append(comment)
                        total_comments += 1
                except Exception as e:
                    logger.error(f"Error processing review: {e}")
            if comments_to_save:
                Comments.objects.insert(comments_to_save)
            if not has_more:
                break
            page += 1
    logger.info(f"{ysbookId} 入库成功.{total_comments} 条评论.")


async def scrape_reviews_task(book_id, session):
    page = 1
    total_comments = 0  # 初始化评论计数器
    bookInfo = Books.objects(ysbookId=book_id).first()

    if not bookInfo:
        logger.warning(f"Book ID {book_id} not found.")
        return
    ysbookId = bookInfo.ysbookId
    
    # Comments.objects(ysbookId=book_id).delete()
    while True:
        # 随机化请求时间
        await asyncio.sleep(random.uniform(0.1, 2))
        has_more, reviews = await fetch_reviews_from_api(book_id, page, session)
        
        if not reviews:
            break
        
        comments_to_save = []
        for review in reviews:
            try:
                content_data = json.loads(review['content'])
                text_parts = [child['text'] for part in content_data for child in part['children']]
                content_text = "<br/>".join(text_parts)  # Join texts with <br/>
                
                create_time_ms = review['createTime']
                create_time_dt = datetime.fromtimestamp(create_time_ms / 1000.0)  # Convert milliseconds to seconds
                formatted_create_time = create_time_dt.strftime('%Y-%m-%d %H:%M:%S.000')  # Format to the desired string
                
                
                comment = Comments(
                    book_id=bookInfo.id,
                    ysbookId=book_id,
                    username=review['author']['name'],
                    createdAt=formatted_create_time,
                    rating=review['score'],
                    content=content_text,
                    upvotes=review['likes']
                )
                # return content_text
                bc = Comments.objects(book_id=comment.book_id, createdAt=comment.createdAt).first()
                if not bc:
                    comments_to_save.append(comment)
                    total_comments += 1  # 增加评论计数
            except Exception as e:
                logger.error(f"Error processing review: {e}")
        if comments_to_save:
            
            Comments.objects.insert(comments_to_save)
        if not has_more:
            break
        page += 1
    logger.info(f"{ysbookId} 入库成功.{total_comments} 条评论.")



async def worker(queue, session):
    while True:
        book_id = await queue.get()
        if book_id is None:
            break
        try:
            await scrape_reviews_task(book_id, session)
        except Exception as e:
            logger.error(f"Error in worker for book_id {book_id}: {e}")
        finally:
            queue.task_done()

async def producer(start, end, queue):
    for book_id in range(start, end):
        await queue.put(book_id)


async def startup_event():
    async with aiohttp.ClientSession() as session:
        tasks = [asyncio.create_task(worker(queue1, session)) for _ in range(2)]
        # max_ysbookId = Comments.objects.order_by('-ysbookId').first().ysbookId
        # max_ysbookId = max_ysbookId - 2 if max_ysbookId else 0
        # 1-136730 无翻页
        await asyncio.gather(
            producer(324213, 325824, queue1),
            *tasks
        )
        
        

@router.get("/start-scraping")
async def start_scraping():
    await startup_event()
    return {"status": "scraping started"}
    
    
    
@router.get("/test_b")
async def test_b(book_id: int):
    async with aiohttp.ClientSession() as session:
        try:
            results = await scrape_reviews_task(book_id, session)
            return {"status": "scraping started", "results": results}
        except Exception as e:
            logger.error(f"Error in worker for book_id {book_id}: {e}")
            return {"status": "error", "message": str(e)}
       
       
            
@router.get("/test_upBook")
async def test_upBook(book_id: int):
    async with aiohttp.ClientSession() as session:
        try:
            results = await fetch_bookInfo_from_api(book_id, session)
            return {"status": "scraping started", "results": results}
        except Exception as e:
            logger.error(f"Error in worker for book_id {book_id}: {e}")
            return {"status": "error", "message": str(e)}            
            
            
# 新增书籍信息  入库api-----------------------------------   



async def scrape_lk_books_task(book_id, session):
    
    bookInfo = Books.objects(ysbookId=book_id).first()
    # logger.info(f"{bookInfo} bookinfo")
    if not bookInfo:
        # 执行入库操作
        book_info_result = await fetch_bookInfo_from_api(book_id)
        # logger.info(f"{bookInfo} book_info_result")
        if book_info_result:
            # 从API返回结果中提取对应的数据填充到Books模型实例中
            book_config_data = book_info_result["data"]["bookConfig"]
            book_info_data = book_info_result["data"]["bookInfo"]
            book_count_data = book_info_result["data"]["bookCount"]
            
            status_value = 1 if book_info_data["finished"] else 0
            
            score_value = book_count_data["score"] if isinstance(book_count_data, dict) and "score" in book_count_data else 0

            # 处理scorerCount字段，使用三元表达式判断book_count_data情况
            scorerCount_value = book_count_data["rateCount"] if isinstance(book_count_data, dict) and "rateCount" in book_count_data else 0
            
            new_book = Books(
                ysbookId=book_id,
                status=status_value,
                tags=book_info_data["tags"],
                score=score_value,
                scorerCount=scorerCount_value,
                title=book_info_data["title"],
                author=book_info_data["author"],
                intro=book_info_data["intro"],
                link=book_config_data["link"],
                cover=book_info_data["cover"],
                countWord=book_info_data["wordCount"],
                updateAt=datetime.now,  # 可以根据实际情况获取更新时间并设置，此处先设为None
                caseId=0  # 同样根据实际需求设置合适的初始值，暂设为0
            )
            new_book.save()
            logger.info(f"{book_id} 入库成功")
            return {"message": "New book info created successfully"}
        else:
            logger.error(f"Failed to fetch book info for book_id {book_id}")
            return {"message": "Failed to fetch book info"}

    logger.info(f"{book_id} 已存在于数据库中，无需重复入库")
    return {"message": "Book already exists in database"}



# FastAPI路由
@router.post("/update-book/{bid}")
async def update_book_info(bid: int):
    async with aiohttp.ClientSession() as session:
        result = await scrape_lk_books_task(bid,session)
        return result

            
async def worker_lk_book(queue, session):
    while True:
        book_id = await queue.get()
        if book_id is None:
            break
        try:
            await scrape_lk_books_task(book_id, session)
        except Exception as e:
            logger.error(f"Error in worker for book_id {book_id}: {e}")
        finally:
            queue.task_done()            
      
@router.get("/start-scraping_lk_book")
async def scraping_lk_book(start_id: int = 327319, end_id: int = 328015):
    async with aiohttp.ClientSession() as session:
        tasks = [asyncio.create_task(worker_lk_book(queue1, session)) for _ in range(2)]
        await asyncio.gather(
            producer(start_id, end_id, queue1),
            *tasks
        )
    return {"status": "lk books scraping started", "range": f"{start_id}-{end_id}"}

            
# @router.get("/start-scraping_lk_book")
# async def scraping_lk_book():
#     async with aiohttp.ClientSession() as session:
#         tasks = [asyncio.create_task(worker_lk_book(queue1, session)) for _ in range(2)]
#         # max_ysbookId = Comments.objects.order_by('-ysbookId').first().ysbookId
#         # max_ysbookId = max_ysbookId - 2 if max_ysbookId else 0
#         # 1-136730 无翻页
#         await asyncio.gather(
#             producer(327319, 328015, queue1),
#             *tasks
#         )
#     return {"status": "lk books scraping started"}           