import requests
from bs4 import BeautifulSoup
import re
import time
from seleniumwire import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.chrome.options import Options
from selenium.webdriver.chrome.service import Service
from .models import WechatArticle

def crawl_wechat_article(article_url):
    """
    爬取公众号文章
    """
    print(f'开始爬取文章: {article_url}')
    
    # 配置Chrome选项
    chrome_options = Options()
    chrome_options.add_argument('--headless')  # 无头模式
    chrome_options.add_argument('--disable-gpu')
    chrome_options.add_argument('--no-sandbox')
    chrome_options.add_argument('--disable-dev-shm-usage')
    chrome_options.add_argument('--disable-blink-features=AutomationControlled')
    chrome_options.add_experimental_option("excludeSwitches", ["enable-automation"])
    chrome_options.add_experimental_option('useAutomationExtension', False)
    
    service = Service('C:/chromedriver-win64/chromedriver.exe')
    
    seleniumwire_options = {
        'exclude_hosts': ['mtg.mimit-pku.org']
    }
    
    driver = webdriver.Chrome(
        seleniumwire_options=seleniumwire_options,
        service=service,
        options=chrome_options
    )
    
    try:
        # 访问文章页面
        driver.get(article_url)
        time.sleep(3)
        
        # 获取页面内容
        page_source = driver.page_source
        soup = BeautifulSoup(page_source, 'html.parser')
        
        # 提取文章标题
        title = extract_title(soup)
        
        # 提取文章内容
        content = extract_content(soup)
        
        # 保存到数据库
        if title and content:
            article = WechatArticle.objects.create(
                title=title,
                content=content,
                url=article_url,
                source='wechat'
            )
            print(f'文章保存成功: {title}')
            return {
                'success': True,
                'title': title,
                'content_length': len(content),
                'id': article.id
            }
        else:
            print('未能提取到文章内容')
            return {
                'success': False,
                'error': '未能提取到文章内容'
            }
            
    except Exception as e:
        print(f'爬取失败: {str(e)}')
        return {
            'success': False,
            'error': str(e)
        }
    finally:
        driver.quit()

def extract_title(soup):
    """提取文章标题"""
    # 尝试多种标题选择器
    title_selectors = [
        'h1',
        '.rich_media_title',
        '.article-title',
        '.post-title',
        'title',
        '[class*="title"]',
        '[id*="title"]'
    ]
    
    for selector in title_selectors:
        title_elem = soup.select_one(selector)
        if title_elem:
            title = title_elem.get_text().strip()
            if title and len(title) > 5:  # 确保标题有意义
                return title
    
    return None

def extract_content(soup):
    """提取文章内容"""
    # 尝试多种内容选择器
    content_selectors = [
        '.rich_media_content',
        '.article-content',
        '.post-content',
        '.content',
        '.article-body',
        '.post-body',
        '[class*="content"]',
        '[id*="content"]'
    ]
    
    for selector in content_selectors:
        content_elem = soup.select_one(selector)
        if content_elem:
            # 清理HTML标签，保留文本内容
            content = clean_html_content(content_elem)
            if content and len(content) > 50:  # 确保内容有意义
                return content
    
    # 如果没找到特定内容区域，尝试提取所有段落
    paragraphs = soup.find_all('p')
    if paragraphs:
        content = '\n\n'.join([p.get_text().strip() for p in paragraphs if p.get_text().strip()])
        if content and len(content) > 50:
            return content
    
    return None

def clean_html_content(element):
    """清理HTML内容，保留文本"""
    # 移除脚本和样式标签
    for script in element(["script", "style"]):
        script.decompose()
    
    # 获取文本内容
    text = element.get_text()
    
    # 清理多余的空白字符
    lines = (line.strip() for line in text.splitlines())
    chunks = (phrase.strip() for line in lines for phrase in line.split("  "))
    text = '\n'.join(chunk for chunk in chunks if chunk)
    
    return text

def batch_crawl_articles(article_urls):
    """批量爬取文章"""
    results = []
    for url in article_urls:
        result = crawl_wechat_article(url)
        results.append(result)
        time.sleep(2)  # 避免请求过于频繁
    return results 