from playwright.sync_api import sync_playwright
import time

def crawl_cls_news():
    with sync_playwright() as p:
        # 启动浏览器
        browser = p.chromium.launch(headless=False)  # 设置为True则不显示浏览器
        page = browser.new_page()
        
        try:
            # 访问初始页面
            print("正在访问列表页面...")
            page.goto("https://www.cls.cn/depth?id=1000", timeout=60000)
            page.wait_for_load_state('networkidle')
            
            # 等待内容加载
            time.sleep(3)
            
            # 获取所有新闻条目
            news_links = []
            for i in range(1, 14):  # 从1到13
                xpath = f'//*[@id="__next"]/div/div[2]/div[2]/div[2]/div[3]/div[{i}]'
                
                try:
                    # 等待元素出现
                    element = page.wait_for_selector(f'xpath={xpath}', timeout=10000)
                    
                    # 在元素内查找链接
                    link_element = element.query_selector('a')
                    if link_element:
                        href = link_element.get_attribute('href')
                        if href:
                            full_link = f"https://www.cls.cn{href}"
                            news_links.append(full_link)
                            print(f"找到链接 {i}: {full_link}")
                    
                except Exception as e:
                    print(f"获取第 {i} 个元素失败: {e}")
                    continue
            
            print(f"\n共找到 {len(news_links)} 个新闻链接")
            
            # 遍历每个链接获取详细内容
            for idx, link in enumerate(news_links, 1):
                print(f"\n{'='*60}")
                print(f"正在处理第 {idx} 篇文章: {link}")
                print(f"{'='*60}")
                
                try:
                    # 在新页面中打开链接
                    new_page = browser.new_page()
                    new_page.goto(link, timeout=60000)
                    new_page.wait_for_load_state('networkidle')
                    
                    # 等待内容加载
                    time.sleep(2)
                    
                    # 定位全文内容元素
                    content_xpath = '//*[@id="__next"]/div/div[2]/div[2]/div[1]/div[3]/div'
                    content_element = new_page.wait_for_selector(f'xpath={content_xpath}', timeout=10000)
                    
                    if content_element:
                        # 获取所有段落
                        paragraphs = content_element.query_selector_all('p')
                        
                        if paragraphs:
                            print(f"\n文章内容 (共 {len(paragraphs)} 段):\n")
                            for p_idx, p in enumerate(paragraphs, 1):
                                text = p.inner_text().strip()
                                if text:  # 只打印非空段落
                                    print(f"{p_idx}. {text}")
                        else:
                            print("未找到段落内容")
                    else:
                        print("未找到内容元素")
                    
                    # 关闭当前文章页面
                    new_page.close()
                    
                except Exception as e:
                    print(f"处理第 {idx} 篇文章时出错: {e}")
                    continue
                    
                # 添加延迟，避免请求过快
                time.sleep(1)
                    
        except Exception as e:
            print(f"爬取过程中出错: {e}")
        
        finally:
            # 关闭浏览器
            browser.close()
            print("\n爬取完成！")

if __name__ == "__main__":
    crawl_cls_news()
