from playwright.sync_api import sync_playwright
import time
import os
from datetime import datetime
import json

def crawl_news():
    # 定义要抓取的网址列表
    urls = [
        "http://news.10jqka.com.cn/today_list/",
        "http://news.10jqka.com.cn/cjzx_list/", 
        "http://news.10jqka.com.cn/guojicj_list/"
    ]
    
    url_names = {
        "http://news.10jqka.com.cn/today_list/": "今日要闻",
        "http://news.10jqka.com.cn/cjzx_list/": "财经资讯",
        "http://news.10jqka.com.cn/guojicj_list/": "国际财经"
    }
    
    # 创建保存目录
    timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
    save_dir = f"news_data_{timestamp}"
    os.makedirs(save_dir, exist_ok=True)
    
    all_news_data = []
    
    with sync_playwright() as p:
        # 启动浏览器
        browser = p.chromium.launch(headless=True)
        
        for url in urls:
            page = browser.new_page()
            category = url_names.get(url, "未知分类")
            
            try:
                print(f"\n{'='*60}")
                print(f"正在访问: {category} - {url}")
                
                # 访问页面
                page.goto(url, timeout=60000)
                page.wait_for_load_state('networkidle')
                
                # 等待页面加载
                time.sleep(3)
                
                # 保存整个HTML内容
                html_content = page.content()
                html_filename = f"{save_dir}/{category}_列表页.html"
                with open(html_filename, 'w', encoding='utf-8') as f:
                    f.write(html_content)
                print(f"已保存HTML内容到: {html_filename}")
                
                # 获取所有的li元素
                li_elements = page.query_selector_all('xpath=/html/body/div[7]/div[1]/div[2]/ul/li')
                
                print(f"在 {category} 中找到 {len(li_elements)} 个新闻条目")
                
                # 遍历每个li元素，提取链接
                news_links = []
                for i, li in enumerate(li_elements):
                    try:
                        # 获取a标签元素
                        link_element = li.query_selector('a.news-link')
                        if link_element:
                            title = link_element.get_attribute('title') or link_element.text_content().strip()
                            href = link_element.get_attribute('href')
                            
                            # 确保链接是完整的URL
                            if href:
                                if href.startswith('//'):
                                    href = 'http:' + href
                                elif not href.startswith('http'):
                                    href = 'http://news.10jqka.com.cn' + href
                                
                                news_links.append({
                                    'title': title,
                                    'href': href,
                                    'index': i,
                                    'category': category
                                })
                    except Exception as e:
                        print(f"处理第 {i+1} 个li元素时出错: {e}")
                        continue
                
                # 遍历每个新闻链接，获取详细内容
                for news in news_links:
                    try:
                        print(f"\n处理新闻: {news['title']}")
                        
                        # 在新页面中打开链接
                        news_page = browser.new_page()
                        news_page.goto(news['href'], timeout=60000)
                        news_page.wait_for_load_state('networkidle')
                        
                        # 等待页面加载
                        time.sleep(2)
                        
                        # 保存新闻页面的完整HTML
                        news_html = news_page.content()
                        # 创建安全的文件名
                        safe_title = "".join(c for c in news['title'] if c.isalnum() or c in (' ', '-', '_')).rstrip()
                        safe_title = safe_title[:50]  # 限制文件名长度
                        news_html_filename = f"{save_dir}/{category}_{safe_title}.html"
                        with open(news_html_filename, 'w', encoding='utf-8') as f:
                            f.write(news_html)
                        
                        # 获取标题
                        title_element = news_page.query_selector('xpath=/html/body/div[4]/div[1]/h2')
                        title_text = "未获取到标题"
                        if title_element:
                            title_text = title_element.text_content().strip()
                        
                        # 获取正文
                        content_element = news_page.query_selector('xpath=/html/body/div[4]/div[1]/div[4]')
                        content_text = "未获取到正文"
                        if content_element:
                            content_text = content_element.text_content().strip()
                        
                        # 收集数据
                        news_data = {
                            'category': category,
                            'original_title': news['title'],
                            'extracted_title': title_text,
                            'url': news['href'],
                            'content': content_text,
                            'content_length': len(content_text),
                            'html_file': news_html_filename,
                            'crawl_time': datetime.now().isoformat()
                        }
                        
                        all_news_data.append(news_data)
                        
                        # 打印信息
                        print(f"标题: {title_text}")
                        preview = content_text[:200] + "..." if len(content_text) > 200 else content_text
                        print(f"正文预览: {preview}")
                        print(f"正文长度: {len(content_text)} 字符")
                        print(f"已保存HTML到: {news_html_filename}")
                        
                        # 关闭当前新闻页面
                        news_page.close()
                        
                    except Exception as e:
                        print(f"处理新闻链接时出错: {e}")
                        continue
                    
                    # 添加延迟，避免请求过于频繁
                    time.sleep(1)
                
            except Exception as e:
                print(f"处理 {category} 页面时出错: {e}")
            
            finally:
                page.close()
        
        # 保存所有数据到JSON文件
        json_filename = f"{save_dir}/all_news_data.json"
        with open(json_filename, 'w', encoding='utf-8') as f:
            json.dump(all_news_data, f, ensure_ascii=False, indent=2)
        print(f"\n所有数据已保存到: {json_filename}")
        
        # 生成汇总报告
        generate_summary_report(all_news_data, save_dir)
        
        # 关闭浏览器
        browser.close()

def generate_summary_report(news_data, save_dir):
    """生成抓取汇总报告"""
    report_filename = f"{save_dir}/抓取报告.txt"
    
    category_count = {}
    total_articles = len(news_data)
    total_content_length = 0
    
    for article in news_data:
        category = article['category']
        if category in category_count:
            category_count[category] += 1
        else:
            category_count[category] = 1
        total_content_length += article['content_length']
    
    with open(report_filename, 'w', encoding='utf-8') as f:
        f.write("新闻抓取汇总报告\n")
        f.write("=" * 50 + "\n")
        f.write(f"抓取时间: {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}\n")
        f.write(f"总文章数: {total_articles}\n")
        f.write(f"总字符数: {total_content_length}\n\n")
        
        f.write("各分类统计:\n")
        for category, count in category_count.items():
            f.write(f"  {category}: {count} 篇文章\n")
        
        f.write("\n文章列表:\n")
        for i, article in enumerate(news_data, 1):
            f.write(f"{i}. [{article['category']}] {article['extracted_title']}\n")
            f.write(f"   链接: {article['url']}\n")
            f.write(f"   长度: {article['content_length']} 字符\n\n")
    
    print(f"汇总报告已生成: {report_filename}")

if __name__ == "__main__":
    crawl_news()
