from fastapi import APIRouter,Depends,Request,HTTPException, status, Query, BackgroundTasks
from novelapp.config.database import connect
from novelapp.models.models import Books,Comments
# from novelapp.api.quene_task import Books,Comments
from pydantic import BaseModel
from novelapp.common.common import returnData
import json,httpx,time,requests,re
from datetime import datetime
from bs4 import BeautifulSoup

import aiohttp
import asyncio
import logging


# 配置日志
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)

router = APIRouter()

# 指定日志文件
log_file = 'scraping_log926_4.log'
file_handler = logging.FileHandler(log_file)
file_handler.setLevel(logging.INFO)
formatter = logging.Formatter('%(asctime)s - %(levelname)s - %(message)s')
file_handler.setFormatter(formatter)
logger.addHandler(file_handler)
router = APIRouter()  

def book_exists(book_id):
    # 假设这里有一个方法可以检查数据库中是否存在特定 book_id 的书籍
    # 返回 True 或 False
    existing_book = Books.objects(ysbookId=book_id).first()
    return existing_book is not None
    
async def scrape_and_save(book_id):
    if book_exists(book_id):
        # 删除原来的 ysbookId
        Books.objects(ysbookId=book_id).delete()
        print(f"Deleted existing book with ID {book_id}.")
    
    url = f"https://yousuu.sbwsz.com/book/{book_id}"
    response = requests.get(url)
    if response.status_code!= 200:
        print(f"Failed to fetch book page for book ID {book_id}. Status code: {response.status_code}")
        return
    soup = BeautifulSoup(response.text, 'html.parser')
    
    my4_elements = soup.find_all('h1', class_='my-4')
    if len(my4_elements) > 1:
        title = my4_elements[1].text
    else:
        title = None
    author = soup.find('p').find('a').text.strip()
    def extract_value(key):
        element = soup.select_one(f'p:-soup-contains("{key}:")')
        if element:
            value_str = element.text
            match = re.search(rf'{key}: (\d+(?:\.\d+)?)', value_str)
            return float(match.group(1)) if match else None
        return None
    score = extract_value('评分')
    scorer_count = extract_value('评分人数')
    count_word = extract_value('总字数')
    img_tag = soup.find('img', class_='img-fluid')
    cover = img_tag['src'] if img_tag else None
    tags = [a.text.strip() for a in soup.find_all('a', href=re.compile(r'/?tags='))]
    book_info = {
        
        "title": title,
        "ysbookId": book_id,
        "author": author,
        "score": score,
        "scorerCount": scorer_count,
        "countWord": count_word,
        "cover": cover,
        "tags": tags
    }
    book = Books(**book_info)
    book.save()
    print(f"Book ID {book_id} processed successfully.")
    return await process_comments(book_id)

async def process_comments(book_id):
    async with aiohttp.ClientSession() as session:
        try:
            from novelapp.api.quene_task import scrape_reviews_task
            results = await scrape_reviews_task(book_id, session)
            return {"status": "scraping started", "results": results}
        except Exception as e:
            logger.error(f"Error in worker for book_id {book_id}: {e}")
            return {"status": "error", "message": str(e)}

@router.get("/process_log")
async def process_log():
    with open('longkong_log926_1.log', 'r') as log_file:
        log_content = log_file.readlines()

    not_found_ids = set()
    for line in log_content:
        match = re.search(r'Book ID (\d+) not found.', line)
        if match:
            book_id = int(match.group(1))
            not_found_ids.add(book_id)

    tasks = []
    for book_id in not_found_ids:
        tasks.append(scrape_and_save(book_id))

    results = await asyncio.gather(*tasks)
    return {"message": "Log processed and books reprocessed.", "results": results}



async def scrape_reviews_task(book_id: int, start_page: int, end_page: int):
    async with aiohttp.ClientSession() as session:
        for page in range(start_page, end_page + 1):
            url = f'https://yousuu.sbwsz.com/book/{book_id}/?page={page}&sort=createdat&order=asc'
            try:
                async with session.get(url) as response:
                    response.raise_for_status()  # 检查HTTP状态码
                    html = await response.text()
            except aiohttp.ClientError as e:
                logger.error(f"Error fetching URL {url}: {e}")
                continue
            
            soup = BeautifulSoup(html, 'html.parser')
            reviews = []
            
            match = re.search(r'Page \d+ of (\d+)', html_content)
            if match:
                total_pages = match.group(1)
                print(total_pages)  # 输出 41
            
            
            
            for li in soup.select('ul.list-group li.list-group-item'):
                try:
                    username = li.find('strong').text
                    p_elements = li.select('p')
                    
                    timestamp_str = p_elements[0].text.split(' - ')[1]
                    text = li.p.text
                    
                    match = re.search(r'评分: (\d+) 星', text)
                    rating = match.group(1) if match else 0
                    
                    content = li.find_all('p')[1].text
                    upvotes_str = p_elements[2].select_one('strong').text
                    upvotes = int(upvotes_str)
                    
                    bookInfo = Books.objects(ysbookId=book_id).first()
                    
                    if not bookInfo:
                        logger.warning(f"Book ID {book_id} not found.")
                        continue
                    
                    ysbookId = bookInfo.ysbookId
                    
                    bc = Comments.objects(book_id=bookInfo.id, username=username).first()      
                    
                    if not bc:
                        comment = Comments(
                            book_id=bookInfo.id,
                            ysbookId=ysbookId,
                            username=username,
                            createdAt=timestamp_str,  # Make sure to parse timestamp to a datetime object if needed
                            rating=rating,
                            content=content,
                            upvotes=upvotes
                        )
                        comment.save()
                    
                except Exception as e:
                    logger.error(f"Error processing review: {e}")
                    continue
    
    logger.info("Scraping complete. Reviews have been processed.")

@router.get("/scrape_reviews/")
async def scrape_reviews(book_id: int, start_page: int, end_page: int, background_tasks: BackgroundTasks):
    
    background_tasks.add_task(scrape_reviews_task, book_id, start_page, end_page)
    return {"message": "Scraping has started in the background."}





@router.get("/scrape_reviews_test/")
async def scrape_reviews_test():
    ysbookId = 1
    url = 'https://yousuu.sbwsz.com/book/1/?page=41&sort=createdat&order=asc'
    async with aiohttp.ClientSession() as session:
        async with session.get(url) as response:
            response = await response.text()
    
    soup = BeautifulSoup(response, 'html.parser')
    
    page_text = soup.find('span', class_='page-link').text
    match = re.search(r'Page \d+ of (\d+)', page_text)
    if match:
        total_pages = match.group(1)
        print(total_pages)  # 输出 41
    
    
    return total_pages
    reviews = []
    for li in soup.select('ul.list-group li.list-group-item'):
        username = li.find('strong').text
        p_elements = li.select('p')
        
        timestamp_str = p_elements[0].text.split(' - ')[1]
        text = li.p.text
        
        match = re.search(r'评分: (\d+) 星', text)
        if match:
            rating = match.group(1)
        else:
            rating = 0
        
        
        content = li.find_all('p')[1].text
        upvotes_str = p_elements[2].select_one('strong').text
        upvotes = int(upvotes_str)
        
        bookInfo =  Books.objects(ysbookId=ysbookId).first()
        book_id = bookInfo.id
        ysbookId = bookInfo.ysbookId
        
        reviews.append({
            'username': username,
            'createdAt': timestamp_str,
            'rating': rating,
            'content': content,
            'upvotes': upvotes
        })

        bc = Comments.objects(book_id=book_id, username=username).first()      
        
        if not bc:
            comment = Comments(
                book_id=book_id,
                ysbookId=ysbookId,
                username=username,
                createdAt=timestamp_str,  # Make sure to parse timestamp to a datetime object if needed
                rating=rating,
                content=content,
                upvotes=upvotes
            )
            comment.save()

    
    
    return reviews

