import aiohttp
import asyncio
from bs4 import BeautifulSoup
from datetime import datetime, timedelta
import json
import re

# --- 配置区 ---

# 目标网站
QBITAI_URL = "https://www.qbitai.com/"
# 爬虫请求头
CRAWLER_HEADERS = {
    'referer': 'https://www.qbitai.com/',
    'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/141.0.0.0 Safari/537.36 Edg/141.0.0.0'
}

# 后端 API 接口
BACKEND_API_URL = "http://39.104.66.118:8444/api/news"
# API 请求头，包含认证信息
API_HEADERS = {
    'Content-Type': 'application/json',
    'Cookie': 'satoken=4d642756-9717-451d-9123-35f5289e2d9b'
}

# --- 新增配置：重试机制与并发控制 ---
MAX_RETRIES = 3  # 最大重试次数
RETRY_DELAY_SECONDS = 2  # 每次重试之间的延迟（秒）
MAX_CONCURRENT_REQUESTS = 3  # 最大并发请求数，避免压垮服务器


def parse_time_to_datetime(time_str):
    """(此函数无变化，保持原样)"""
    now = datetime.now()
    time_str = time_str.strip()
    if match := re.search(r'(\d+)\s*分钟前', time_str): return now - timedelta(minutes=int(match.group(1)))
    if match := re.search(r'(\d+)\s*小时前', time_str): return now - timedelta(hours=int(match.group(1)))
    if match := re.search(r'昨天\s*(\d{1,2}):(\d{2})', time_str):
        hour, minute = int(match.group(1)), int(match.group(2))
        yesterday = now - timedelta(days=1)
        return yesterday.replace(hour=hour, minute=minute, second=0, microsecond=0)
    if match := re.search(r'前天\s*(\d{1,2}):(\d{2})', time_str):
        hour, minute = int(match.group(1)), int(match.group(2))
        day_before_yesterday = now - timedelta(days=2)
        return day_before_yesterday.replace(hour=hour, minute=minute, second=0, microsecond=0)
    if match := re.search(r'(\d{4})-(\d{1,2})-(\d{1,2})', time_str):
        try: year, month, day = map(int, match.groups()); return datetime(year, month, day)
        except ValueError: return None
    if match := re.search(r'(\d{1,2})-(\d{1,2})\s*(\d{1,2}):(\d{2})', time_str):
        try:
            month, day, hour, minute = map(int, match.groups())
            return datetime(now.year, month, day, hour, minute)
        except ValueError: return None
    return None


async def fetch_news_info(url, headers):
    """(此函数无变化，保持原样)"""
    print(f"开始爬取 {url} 的新闻...")
    async with aiohttp.ClientSession() as session:
        try:
            async with session.get(url, headers=headers, timeout=15) as response:
                response.raise_for_status()
                html = await response.text()
                soup = BeautifulSoup(html, 'html.parser')
                picture_text_list = soup.find_all('div', class_='picture_text')
                if not picture_text_list: print("警告：未在页面中找到 'picture_text' 容器。"); return []
                
                recent_news = []
                time_threshold = datetime.now() - timedelta(hours=24)
                for picture_text in picture_text_list:
                    time_tag = picture_text.find('span', class_='time')
                    if not time_tag: continue
                    news_publish_time = parse_time_to_datetime(time_tag.get_text())
                    if not news_publish_time or news_publish_time < time_threshold: continue
                    
                    img_tag = picture_text.find('img', class_='wp-post-image')
                    img_url = img_tag['src'] if img_tag else ''
                    title_tag = picture_text.find('h4').find('a')
                    title = title_tag.get_text(strip=True) if title_tag else None
                    title_link = title_tag['href'] if (title_tag and 'href' in title_tag.attrs) else None
                    info_p = picture_text.find('div', class_='text_box').find('p')
                    info_text = info_p.get_text(strip=True) if info_p else None
                    
                    if title:
                        recent_news.append({'imgUrl': img_url, 'title': title, 'titleLink': title_link, 'infoText': info_text})
                return recent_news
        except Exception as e:
            print(f"爬取新闻时发生错误: {e}")
            return []


async def send_news_with_semaphore(semaphore, session, news_item):
    """带并发控制的发送函数包装器"""
    async with semaphore:
        return await send_news_to_backend(session, news_item)


async def send_news_to_backend(session, news_item):
    """
    将单条新闻发送到后端 API，并包含健壮的重试逻辑。
    """
    title_short = news_item['title'][:30]
    
    for attempt in range(MAX_RETRIES):
        try:
            async with session.post(
                BACKEND_API_URL, 
                json=news_item,
                headers=API_HEADERS,
                timeout=10
            ) as response:
                response.raise_for_status()
                print(f"✅ 成功发送新闻: '{title_short}...'")
                return True

        # --- 使用一个通用的 except 块来处理所有 aiohttp 相关的错误 ---
        except aiohttp.ClientError as e:
            # --- 核心修复：使用 hasattr 安全地检查属性 ---
            status_code = getattr(e, 'status', 'N/A')
            error_message = str(e)

            # 对于 4xx 客户端错误，通常重试无益
            if isinstance(status_code, int) and 400 <= status_code < 500:
                print(f"❌ 发送新闻失败 (客户端错误 HTTP {status_code}): '{title_short}...' | 原因: {error_message}")
                # 如果是 401 Unauthorized，特别提示
                if status_code == 401:
                    print("   提示：这可能是认证失败，请检查 Cookie 中的 satoken 是否有效。")
                return False # 不再重试

            # 对于 5xx 服务器错误和网络问题，可以重试
            print(f"⚠️ 发送新闻遇到问题 (HTTP {status_code}), 将重试... ({attempt + 1}/{MAX_RETRIES}) | '{title_short}...'")

        except Exception as e:
            print(f"⚠️ 发送新闻遇到未知错误, 将重试... ({attempt + 1}/{MAX_RETRIES}) | '{title_short}...' | 错误: {e}")

        # 如果不是最后一次尝试，则等待后重试
        if attempt < MAX_RETRIES - 1:
            await asyncio.sleep(RETRY_DELAY_SECONDS)
    
    # 所有重试都失败了
    print(f"❌ 发送新闻最终失败: '{title_short}...'")
    return False


async def main():
    """
    主函数：协调整个爬取和发送流程。
    """
    news_list = await fetch_news_info(QBITAI_URL, CRAWLER_HEADERS)
    
    if not news_list:
        print(f"未找到最近24小时内的相关新闻或爬取失败。")
        return

    print(f"\n共找到 {len(news_list)} 条最近24小时内的新闻，准备发送到后端...\n")

    # 创建并发限制信号量
    semaphore = asyncio.Semaphore(MAX_CONCURRENT_REQUESTS)
    
    async with aiohttp.ClientSession() as session:
        # 使用带并发控制的任务列表
        tasks = [send_news_with_semaphore(semaphore, session, news) for news in news_list]
        results = await asyncio.gather(*tasks)
    
    success_count = sum(1 for r in results if r)
    failure_count = len(news_list) - success_count
    
    print("\n" + "=" * 80)
    print(f"任务完成！共尝试发送 {len(news_list)} 条新闻。")
    print(f"成功: {success_count} 条")
    print(f"失败: {failure_count} 条")
    print("=" * 80)


if __name__ == "__main__":
    asyncio.run(main())
