import random
import time
import requests
from bs4 import BeautifulSoup
import re
import json
import os

def get_url_list(url):
    response = requests.get(url)
    response.encoding = 'utf-8'
    soup = BeautifulSoup(response.text, 'html.parser')
    url_list = {}
    # 查找包含docArr的script标签
    scripts = soup.find_all('script')
    for script in scripts:
        if 'var docArr' in script.text:
            # 使用正则表达式提取数组内容
            match = re.search(r'var docArr\s*=\s*(\[.*?\])', script.text, re.DOTALL)
            if match:
                docArr = json.loads(match.group(1))
                for item in docArr:
                    url_list[item['id']] = {
                        'url': item['url'],
                        'title': item['title'],
                        'content': item['content'],
                        'pubtime': item['pubtime']
                    }
    return url_list

def get_news_content(url):
    response = requests.get(url)
    response.encoding = 'utf-8'
    soup = BeautifulSoup(response.text, 'html.parser')
    text = soup.select(".left_zw")[0].get_text()
    print('have get', url)
    time.sleep(random.randrange(0, 1000) * 0.001 + 0.5)
    return text

def get_row_urls():
    urls = []
    for i in range(1, 20):
        url = f"https://channel.chinanews.com.cn/cns/cl/gj-zxsjg.shtml?pager={i}"
        urls.append(url)
    return urls

def process_and_save_batch(batch_data, output_file):
    # 如果文件不存在，创建一个新的JSON文件
    if not os.path.exists(output_file):
        with open(output_file, 'w', encoding='utf-8') as f:
            json.dump({}, f, ensure_ascii=False, indent=4)
    
    # 读取现有数据
    with open(output_file, 'r', encoding='utf-8') as f:
        existing_data = json.load(f)
    
    # 更新数据
    existing_data.update(batch_data)
    
    # 写回文件
    with open(output_file, 'w', encoding='utf-8') as f:
        json.dump(existing_data, f, ensure_ascii=False, indent=4)

def main():
    output_file = './url_list.json'
    total_count = 0
    
    # 处理每个分页
    for url in get_row_urls():
        # 获取当前页的URL列表
        current_page_urls = get_url_list(url)
        
        # 获取每个URL的内容并立即处理
        for news_id, news_info in current_page_urls.items():
            total_count += 1
            print(f"Processing item {total_count}")
            
            try:
                # 获取新闻内容
                news_text = get_news_content(news_info['url'])
                news_info['text'] = news_text
                
                # 创建一个只包含当前新闻的字典
                current_news = {news_id: news_info}
                
                # 立即保存到文件
                process_and_save_batch(current_news, output_file)
                
            except Exception as e:
                print(f"Error processing {news_info['url']}: {str(e)}")
                continue

if __name__ == "__main__":
    main()

