import requests
import json
import logging
import re
from datetime import datetime
from urllib.parse import urljoin, urlparse
from bs4 import BeautifulSoup, Tag
from typing import Any, List, Dict, Optional
from src.models.models import db, Subscription, DownloadTask, CrawlLog, SystemConfig, get_beijing_time
from src.services.downloader import create_download_tasks

# 配置日志
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)

def extract_download_links(content, base_url=""):
    """
    从内容中提取各种类型的下载链接
    支持：磁力链接、种子链接、迅雷链接等
    """
    download_links = []
    
    # 磁力链接正则
    magnet_pattern = r'magnet:\?xt=urn:btih:[a-fA-F0-9]{40}[^\s"<>]*'
    magnet_links = re.findall(magnet_pattern, content)
    download_links.extend([{'url': link, 'type': 'magnet'} for link in magnet_links])
    
    # # 种子文件链接正则
    # torrent_pattern = r'https?://[^\s"<>]+\.torrent(?:\?[^\s"<>]*)?'
    # torrent_links = re.findall(torrent_pattern, content)
    # download_links.extend([{'url': link, 'type': 'torrent'} for link in torrent_links])
    #
    # # 迅雷链接正则
    # thunder_pattern = r'thunder://[a-zA-Z0-9+/=]+'
    # thunder_links = re.findall(thunder_pattern, content)
    # download_links.extend([{'url': link, 'type': 'thunder'} for link in thunder_links])
    #
    # # ed2k链接正则
    # ed2k_pattern = r'ed2k://\|file\|[^|]+\|[0-9]+\|[a-fA-F0-9]{32}\|[^|]*\|?/?'
    # ed2k_links = re.findall(ed2k_pattern, content)
    # download_links.extend([{'url': link, 'type': 'ed2k'} for link in ed2k_links])
    #
    # # 百度网盘链接正则
    # baidu_pattern = r'https?://pan\.baidu\.com/s/[a-zA-Z0-9_-]+'
    # baidu_links = re.findall(baidu_pattern, content)
    # download_links.extend([{'url': link, 'type': 'baidu'} for link in baidu_links])
    #
    # # 阿里云盘链接正则
    # aliyun_pattern = r'https?://www\.aliyundrive\.com/s/[a-zA-Z0-9]+'
    # aliyun_links = re.findall(aliyun_pattern, content)
    # download_links.extend([{'url': link, 'type': 'aliyun'} for link in aliyun_links])
    
    return download_links

def parse_rss_feed(content):
    """解析RSS订阅内容"""
    try:
        soup = BeautifulSoup(content, 'xml')
        items = soup.find_all('item')
        
        download_links = []
        for item in items:
            # 从RSS项目中提取下载链接
            description = item.find('description')  # type: ignore
            if description:
                links = extract_download_links(description.get_text())  # type: ignore
                download_links.extend(links)
            
            # 检查enclosure标签（通常包含种子文件）
            enclosure = item.find('enclosure')  # type: ignore
            if enclosure and enclosure.get('url'):  # type: ignore
                url = enclosure.get('url')  # type: ignore
                if url and url.endswith('.torrent'):  # type: ignore
                    download_links.append({'url': url, 'type': 'torrent'})
        
        return download_links
    except Exception as e:
        logger.error(f"RSS解析失败: {str(e)}")
        return []

def parse_html_page(content, base_url, site=None):
    """解析HTML页面内容，支持不同站点自定义解析"""
    try:
        soup = BeautifulSoup(content, 'html.parser')
        download_links = []
        # dygod: 保持原有逻辑
        if site == 'dygod' or not site:
            # 从页面文本中提取下载链接
            page_text = soup.get_text()
            download_links = extract_download_links(page_text, base_url)
            # 从链接href属性中查找下载链接
            links = soup.find_all('a', href=True)
            for link in links:
                href = link.get('href')
                if href:
                    href_str = str(href)
                    full_url = urljoin(base_url, href_str)
                    if href_str.startswith('magnet:'):
                        download_links.append({'url': href_str, 'type': 'magnet'})
                    elif href_str.startswith('thunder://'):
                        download_links.append({'url': href_str, 'type': 'thunder'})
                    elif href_str.startswith('ed2k://'):
                        download_links.append({'url': href_str, 'type': 'ed2k'})
                    elif full_url.endswith('.torrent'):
                        download_links.append({'url': full_url, 'type': 'torrent'})
        # meijutt: 新逻辑
        elif site == 'meijutt':
            tabs = soup.find('div', class_='tabs-list current-tab')
            if isinstance(tabs, Tag):
                strong_tags = tabs.find_all('strong', class_='down_part_name')
                for strong in strong_tags:
                    if isinstance(strong, Tag):
                        a_tag = strong.find('a', href=True)
                        href = a_tag['href'] if isinstance(a_tag, Tag) and 'href' in a_tag.attrs else None
                        if href:
                            download_links.append({'url': href, 'type': 'meijutt'})
                        else:
                            text = strong.get_text(strip=True)
                            if text:
                                download_links.append({'url': text, 'type': 'meijutt'})
        return download_links
    except Exception as e:
        logger.error(f"HTML解析失败: {str(e)}")
        return []

def downloadlist(subscription_url, site=None):
    """
    爬取下载地址的函数，支持site类型区分
    """
    try:
        headers = {
            'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36'
        }
        response = requests.get(subscription_url, headers=headers, timeout=30)
        response.raise_for_status()
        response.encoding = response.apparent_encoding
        content = response.text
        content_type = response.headers.get('content-type', '').lower()
        download_links = []
        # 根据内容类型选择解析方式
        if 'xml' in content_type or 'rss' in content_type:
            download_links = parse_rss_feed(content)
        else:
            download_links = parse_html_page(content, subscription_url, site)
        # 去重并转换为URL列表
        unique_urls = []
        seen_urls = set()
        for link in download_links:
            url = link['url']
            if url not in seen_urls:
                seen_urls.add(url)
                unique_urls.append(url)
        logger.info(f"从 {subscription_url} 提取到 {len(unique_urls)} 个下载链接")
        return {
            'success': True,
            'urls': unique_urls,
            'message': f'成功爬取到{len(unique_urls)}个下载地址'
        }
    except requests.RequestException as e:
        logger.error(f"爬取失败: {str(e)}")
        return {
            'success': False,
            'urls': [],
            'message': f'爬取失败: {str(e)}'
        }
    except Exception as e:
        logger.error(f"爬取出错: {str(e)}")
        return {
            'success': False,
            'urls': [],
            'message': f'爬取出错: {str(e)}'
        }

def should_auto_download(subscription):
    """
    判断是否应该自动下载
    1. 全局自动下载开关必须开启
    2. 订阅的自动下载开关必须开启
    """
    try:
        # 检查全局自动下载开关
        global_auto_download = SystemConfig.get_config('global_auto_download', 'false') == 'true'
        if not global_auto_download:
            logger.info(f"全局自动下载开关未开启，跳过订阅 {subscription.name} 的自动下载")
            return False
        
        # 检查订阅的自动下载开关
        if not subscription.auto_download:
            logger.info(f"订阅 {subscription.name} 的自动下载开关未开启")
            return False
        
        logger.info(f"订阅 {subscription.name} 满足自动下载条件")
        return True
        
    except Exception as e:
        logger.error(f"检查自动下载条件失败: {str(e)}")
        return False

def crawl_subscription(subscription):
    """
    爬取单个订阅，传递site参数
    """
    try:
        logger.info(f"开始爬取订阅: {subscription.name}")
        # 调用爬取函数，传递site
        result = downloadlist(subscription.url, getattr(subscription, 'site', None))
        
        if not result['success']:
            # 记录失败日志
            log = CrawlLog()
            log.subscription_id = subscription.id
            log.crawl_count = get_crawl_count(subscription.id) + 1
            log.status = 'failed'
            log.error_message = result['message']
            log.total_urls_count = len(subscription.get_latest_download_urls())
            db.session.add(log)
            db.session.commit()
            
            return {
                'success': False,
                'message': result['message']
            }
        
        # 获取新爬取的地址
        new_urls = result['urls']
        current_urls = subscription.get_latest_download_urls()
        
        # 计算增量地址
        incremental_urls = [url for url in new_urls if url not in current_urls]
        
        # 更新订阅的最新地址
        subscription.set_latest_download_urls(new_urls)
        
        # 记录爬取日志
        log = CrawlLog()
        log.subscription_id = subscription.id
        log.crawl_count = get_crawl_count(subscription.id) + 1
        log.total_urls_count = len(new_urls)
        log.status = 'success'
        
        if incremental_urls:
            log.set_new_urls(incremental_urls)
            
            # 检查是否应该自动下载
            if should_auto_download(subscription):
                logger.info(f"订阅 {subscription.name} 开启自动下载，为 {len(incremental_urls)} 个增量地址创建下载任务")
                create_download_tasks(subscription, incremental_urls)
            else:
                logger.info(f"订阅 {subscription.name} 未开启自动下载，跳过下载任务创建")
        
        db.session.add(log)
        db.session.commit()
        
        logger.info(f"订阅 {subscription.name} 爬取完成，新增 {len(incremental_urls)} 个地址")
        
        return {
            'success': True,
            'data': {
                'total_urls': len(new_urls),
                'new_urls': len(incremental_urls),
                'incremental_urls': incremental_urls
            },
            'message': f'爬取成功，新增{len(incremental_urls)}个下载地址'
        }
        
    except Exception as e:
        logger.error(f"爬取订阅失败: {str(e)}")
        
        # 记录失败日志
        try:
            log = CrawlLog()
            log.subscription_id = subscription.id
            log.crawl_count = get_crawl_count(subscription.id) + 1
            log.status = 'failed'
            log.error_message = str(e)
            log.total_urls_count = len(subscription.get_latest_download_urls())
            db.session.add(log)
            db.session.commit()
        except:
            pass
        
        return {
            'success': False,
            'message': f'爬取失败: {str(e)}'
        }

def get_crawl_count(subscription_id):
    """获取订阅的爬取次数"""
    try:
        last_log = CrawlLog.query.filter_by(subscription_id=subscription_id)\
                              .order_by(CrawlLog.crawl_count.desc()).first()
        return last_log.crawl_count if last_log else 0
    except:
        return 0

def crawl_all_active_subscriptions():
    """爬取所有活跃的订阅"""
    try:
        active_subscriptions = Subscription.query.filter_by(is_active=True).all()
        
        results = []
        for subscription in active_subscriptions:
            result = crawl_subscription(subscription)
            results.append({
                'subscription_id': subscription.id,
                'subscription_name': subscription.name,
                'result': result
            })
        
        logger.info(f"批量爬取完成，处理了{len(active_subscriptions)}个订阅")
        
        return {
            'success': True,
            'data': {
                'processed_count': len(active_subscriptions),
                'results': results
            },
            'message': f'批量爬取完成，处理了{len(active_subscriptions)}个订阅'
        }
        
    except Exception as e:
        logger.error(f"批量爬取失败: {str(e)}")
        return {
            'success': False,
            'message': f'批量爬取失败: {str(e)}'
        }

