import feedparser
import requests
from bs4 import BeautifulSoup
from app import db
from app.models import Article
from datetime import datetime
from flask import current_app
import time
from urllib.parse import urljoin

def scrape_by_category(category_name, url):
    current_app.logger.info(f"开始抓取分类 [{category_name}] 从 {url}...")
    
    # 判断是RSS源还是HTML页面
    if 'rss' in url:
        # --- RSS解析逻辑 ---
        parse_rss(category_name, url)
    else:
        # --- HTML解析逻辑 ---
        parse_html(category_name, url)

def parse_rss(category_name, url):
    try:
        feed = feedparser.parse(url)
        found_articles = 0
        for entry in feed.entries:
            title = entry.title
            article_url = entry.link
            published_time = datetime.fromtimestamp(time.mktime(entry.published_parsed)) if 'published_parsed' in entry else datetime.utcnow()
            add_article_to_db(category_name, title, article_url, published_time)
            found_articles += 1
        
        db.session.commit()
        current_app.logger.info(f"RSS分类 [{category_name}] 抓取完成，发现 {found_articles} 篇文章。")
    except Exception as e:
        current_app.logger.error(f"处理RSS分类 [{category_name}] 时发生错误: {e}")
        db.session.rollback()

def parse_html(category_name, url):
    try:
        headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36'}
        response = requests.get(url, headers=headers, timeout=10)
        response.raise_for_status()

        soup = BeautifulSoup(response.text, 'html.parser')
        found_articles = 0
        # 查找所有 class="item" 的博客条目
        for item in soup.find_all('div', class_='item'):
            title_tag = item.find('a', class_='header')
            if title_tag:
                title = title_tag.get_text(strip=True)
                article_url = title_tag['href']
                # 确保URL是绝对路径
                if not article_url.startswith('http'):
                    article_url = urljoin('https://www.oschina.net', article_url)
                
                # HTML页面没有发布时间，统一使用抓取时间
                add_article_to_db(category_name, title, article_url, datetime.utcnow())
                found_articles += 1

        db.session.commit()
        current_app.logger.info(f"HTML分类 [{category_name}] 抓取完成，发现 {found_articles} 篇文章。")
    except Exception as e:
        current_app.logger.error(f"处理HTML分类 [{category_name}] 时发生错误: {e}")
        db.session.rollback()

def add_article_to_db(category, title, url, pub_date):
    if title and url:
        if not Article.query.filter_by(url=url).first():
            new_article = Article(
                title=title,
                url=url,
                source='开源中国',
                category=category,
                published_at=pub_date
            )
            db.session.add(new_article)
            current_app.logger.info(f"[{category}] 新增: {title}")