# from fastapi import APIRouter, Depends, Request, HTTPException, status, Query, BackgroundTasks
# from novelapp.config.database import connect
# from novelapp.models.models import Books, Comments
# from pydantic import BaseModel
# from novelapp.common.common import returnData,setLog
# import json, httpx, time, requests, re
# from datetime import datetime
# from bs4 import BeautifulSoup
# import aiohttp
# import asyncio
# import logging
# import random
# from aiohttp import web

# router = APIRouter()


# logger = setLog('scraping_log924_3.log')

# tasks = {}
# # 定义批量入库的阈值
# BATCH_SIZE = 60

# queue1 = asyncio.Queue()
# data_queue = asyncio.Queue()

# async def scrape_reviews_task(book_id, start_page=1, end_page=41,session=''):
#     bookInfo = Books.objects(ysbookId=book_id).first()

#     if not bookInfo:
#         logger.warning(f"Book ID {book_id} not found.")
#         return
#     ysbookId = bookInfo.ysbookId
    
#     total_comments = 0  # 初始化评论计数器
    
#     for page in range(start_page, end_page + 1):
#         url = f'https://yousuu.sbwsz.com/book/{book_id}/?page={page}'
#         try:
#             async with session.get(url) as response:
#                 response.raise_for_status()
#                 html = await response.text()
#         except aiohttp.ClientError as e:
#             logger.error(f"Error fetching URL {url}: {e}")
#             continue
        
#         await asyncio.sleep(random.uniform(0.2, 1))
        
#         soup = BeautifulSoup(html, 'html.parser')
#         reviews = []

#         comments_to_save = []
#         for li in soup.select('ul.list-group li.list-group-item'):
#             try:
#                 username = li.find('strong').text
#                 p_elements = li.select('p')

#                 timestamp_str = p_elements[0].text.split(' - ')[1]
#                 text = li.p.text

#                 match = re.search(r'评分: (\d+) 星', text)
#                 rating = match.group(1) if match else 0

#                 content = li.find_all('p')[1].decode_contents()  # 获取 HTML 内容
#                 upvotes_str = p_elements[2].select_one('strong').text
#                 upvotes = int(upvotes_str)

#                 comment = Comments(
#                     book_id=bookInfo.id,
#                     ysbookId=ysbookId,
#                     username=username,
#                     createdAt=timestamp_str,
#                     rating=rating,
#                     content=content,
#                     upvotes=upvotes
#                 )
#                 # await data_queue.put(comment)
#                 # bc = Comments.objects(book_id=comment.book_id, createdAt=comment.createdAt).first()
#                 # if not bc:
#                 comments_to_save.append(comment)
#                 total_comments += 1  # 增加评论计数
#             except Exception as e:
#                 logger.error(f"Error processing review: {e}")
#                 continue
#         # 批量保存评论
#         if comments_to_save:
#             Comments.objects.insert(comments_to_save)    
#     logger.info(f"{ysbookId} 入库成功.{total_comments} 条评论.")

# async def worker(queue,session):
#     while True:
#         book_id = await queue.get()
#         if book_id is None:
#             break
        
#         timeout_b = 10
#         end_page = 41
#         url = f'https://yousuu.sbwsz.com/book/{book_id}/?page=1'
#         try:
#             async with session.get(url) as response:
#                 response.raise_for_status()
#                 html = await response.text()
#         except aiohttp.ClientError as e:
#             logger.error(f"没有页码 {book_id} ")
#             continue

#         soup = BeautifulSoup(html, 'html.parser')
#         reviews = []

#         match = re.search(r'Page \d+ of (\d+)', html)
#         if match:
#             end_page = int(match.group(1))
#         timeout_b = end_page*40
           
#         logger.info(f"Book ID {book_id}-{timeout_b} 总页数 ：{end_page}.")
        
        
        
#         try:
#             # 设置超时时间为300秒
#             await asyncio.wait_for(scrape_reviews_task(book_id,1,end_page,session), timeout=timeout_b)
#         except asyncio.TimeoutError:
#             logger.error(f"Task for book_id {book_id} timed out")
#         finally:
#             queue.task_done()
            

            
# async def producer(start, end, queue):
#     for book_id in range(start, end):
#         await queue.put(book_id)

# # @router.on_event("startup")
# async def startup_event():
#     async with aiohttp.ClientSession() as session:
#         tasks = []
#         for _ in range(4):  # Set the number of worker processes
#             tasks.append(asyncio.create_task(worker(queue1, session)))
        
#         # tasks.append(asyncio.create_task(data_worker(data_queue)))

#         max_ysbookId = Comments.objects.order_by('-ysbookId').first().ysbookId
#         max_ysbookId = max_ysbookId - 2
#         await asyncio.gather(
#             producer(max_ysbookId, 326140, queue1),
#             *tasks
#         )

# # 121383
# @router.get("/start-scraping")
# async def start_scraping():
#     # 启动爬虫任务
#     await startup_event()
#     return {"status": "scraping started"}


# async def task_scheduler(queue, session):
#     while True:
#         queue_size = queue.qsize()
#         num_workers = min(3, max(1, queue_size // 10))
        
#         current_workers = len(asyncio.all_tasks())
#         if current_workers > num_workers:
#             for _ in range(current_workers - num_workers):
#                 await queue.put(None)
#         elif current_workers < num_workers:
#             for _ in range(num_workers - current_workers):
#                 asyncio.create_task(worker(queue, session))

#         await asyncio.sleep(5)  # 每隔一段时间检查一次任务队列
        
#         logger.error(f"task_scheduler {num_workers} ")


    
# # 查看队列状态的接口
# @router.get("/get_queue_status")
# async def get_queue_status():
#     status = {
#         'queue_size': queue1.qsize(),
#         'unfinished_tasks': queue1._unfinished_tasks,
#     }
#     return web.json_response(status)    
    
# async def data_worker(data_queue):
#     batch = []
#     while True:
#         comment_data = await data_queue.get()
#         if comment_data is None:
#             break

#         # 在这里进行查询验证
#         bc = Comments.objects(book_id=comment_data.book_id, createdAt=comment_data.createdAt).first()
#         if not bc:
#             batch.append(comment_data)
#             if len(batch) >= BATCH_SIZE:
#                 Comments.objects.insert(batch)
#                 logger.info(f"{len(batch)} comments inserted.")
#                 batch = []
#         # else:
#         #     logger.info(f"Duplicate comment for book_id {comment_data.book_id} and createdAt {comment_data.createdAt}. Skipping.")

#     if batch:
#         Comments.objects.insert(batch)
#         logger.info(f"Remaining batch of {len(batch)} comments inserted.")

#     data_queue.task_done()


