import requests
import json
import re
from datetime import datetime, timedelta
import mysql.connector
from mysql.connector import Error
import time
import hashlib
import threading
import queue
from concurrent.futures import ThreadPoolExecutor

def connect_to_mysql():
    """连接到MySQL数据库"""
    try:
        connection = mysql.connector.connect(
            host='localhost',
            database='xinwen',
            user='xinwen',
            password='fTnRHaMB76dxW5HG'
        )
        if connection.is_connected():
            return connection
        else:
            print("数据库连接失败")
            return None
    except Error as e:
        print(f"连接数据库时出错: {e}")
        return None

def create_news_table_if_not_exists():
    """如果news表不存在，则创建它"""
    connection = connect_to_mysql()
    if not connection:
        return False
    
    try:
        cursor = connection.cursor()
        
        # 创建news表的SQL语句
        create_table_query = """
        CREATE TABLE IF NOT EXISTS news (
            id INT AUTO_INCREMENT PRIMARY KEY,
            title VARCHAR(255) NOT NULL,
            url VARCHAR(512) NOT NULL,
            summary TEXT,
            publish_date VARCHAR(50),
            source_site VARCHAR(100),
            issuing_agency VARCHAR(100),
            topic_category VARCHAR(255),
            attachments TEXT,
            created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP
        ) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_unicode_ci;
        """
        
        cursor.execute(create_table_query)
        connection.commit()
        print("已确保news表存在")
        return True
    
    except Error as e:
        print(f"创建表时出错: {e}")
        return False
    finally:
        if connection.is_connected():
            cursor.close()
            connection.close()

def save_to_mysql(news_data):
    """将新闻数据保存到MySQL数据库"""
    # 首先确保表存在
    if not create_news_table_if_not_exists():
        return False
    
    connection = connect_to_mysql()
    if not connection:
        return False
    
    try:
        cursor = connection.cursor()
        
        # 准备插入语句
        insert_query = """
        INSERT INTO news (title, url, summary, publish_date, source_site, issuing_agency, topic_category, attachments) 
        VALUES (%s, %s, %s, %s, %s, %s, %s, %s)
        """
        
        # 执行插入操作
        for news in news_data:
            values = (
                news.get('title'),
                news.get('url'),
                news.get('summary'),
                news.get('publication_date'),
                news.get('source'),
                news.get('issuing_agency'),
                news.get('topic_category'),
                json.dumps(news.get('attachments')) if news.get('attachments') else None
            )
            cursor.execute(insert_query, values)
        
        # 提交事务
        connection.commit()
        print(f"成功保存 {len(news_data)} 条新闻到数据库")
        return True
    
    except Error as e:
        print(f"保存数据时出错: {e}")
        return False
    finally:
        if connection.is_connected():
            cursor.close()
            connection.close()

def connect_to_redis():
    """连接到Redis数据库"""
    try:
        import redis
        # 连接到Redis服务器
        r = redis.Redis(
            host='localhost',
            port=6379,
            password='Mn4940128',
            db=2,
            decode_responses=True
        )
        if r.ping():
            return r
        else:
            print("Redis连接失败")
            return None
    except Exception as e:
        print(f"连接Redis时出错: {e}")
        return None

def generate_url_hash(url):
    """生成URL的MD5哈希值"""
    return hashlib.md5(url.encode('utf-8')).hexdigest()

def check_news_exists(url):
    """检查新闻链接是否已存在于Redis中"""
    r = connect_to_redis()
    if not r:
        return False
    
    try:
        url_hash = generate_url_hash(url)
        # 使用单个集合存储所有URL哈希
        return r.sismember('news_urls', url_hash)
    except Exception as e:
        print(f"检查新闻链接时出错: {e}")
        return False

def add_news_url(url):
    """将新闻链接添加到Redis用于去重"""
    r = connect_to_redis()
    if not r:
        return False
    
    try:
        url_hash = generate_url_hash(url)
        # 添加到集合中
        r.sadd('news_urls', url_hash)
        return True
    except Exception as e:
        print(f"添加新闻链接到Redis时出错: {e}")
        return False

def filter_duplicate_news(news_data):
    """过滤掉已经存在于Redis中的新闻"""
    unique_news = []
    for news in news_data:
        url = news.get('url')
        if url and not check_news_exists(url):
            add_news_url(url)
            unique_news.append(news)
    
    filtered_count = len(news_data) - len(unique_news)
    if filtered_count > 0:
        print(f"已过滤 {filtered_count} 条重复新闻")
    
    return unique_news

def clean_html(raw_html):
  """简单的HTML标签移除函数，移除<em></em>等标签"""
  if raw_html is None:
      return None
  # 使用更通用的方式移除所有HTML标签
  cleanr = re.compile('<.*?>')
  cleantext = re.sub(cleanr, '', raw_html)
  return cleantext

# Modified function to accept a dictionary directly
def parse_news_data_from_dict(data):
    """解析包含新闻列表的字典并提取关键信息"""
    extracted_news = []

    if not isinstance(data, dict):
        print("错误: 输入的不是有效的字典格式。")
        return []

    # Check top-level structure
    if data.get("ok") and "resultDocs" in data and isinstance(data["resultDocs"], list):
        for index, doc in enumerate(data["resultDocs"]):
            doc_data = doc.get("data", {})
            if not doc_data:
                print(f"警告: 第 {index+1} 条记录缺少 'data' 字段，已跳过。")
                continue

            # --- Extract basic info ---
            title = doc_data.get("titleO") # Use original title
            url = doc_data.get("url")
            summary = clean_html(doc_data.get("summary")) # Clean summary
            doc_date = doc_data.get("docDate")

            # Extract source site name
            site_label_info = doc_data.get("siteLabel", {})
            source_site = site_label_info.get("value") if isinstance(site_label_info, dict) else None

            # --- Extract optional info from formatRows ---
            issuing_agency = None
            topic_category = None
            attachments = None
            format_rows = doc_data.get("formatRows")

            if format_rows and isinstance(format_rows, list):
                for row_item in format_rows:
                    cols = row_item.get("col")
                    if cols and isinstance(cols, list):
                        for col_item in cols:
                            field_text = col_item.get("text")
                            field_value = col_item.get("value")

                            if field_text == "发文机构":
                                if isinstance(field_value, list) and field_value:
                                    issuing_agency = field_value[0]
                                elif isinstance(field_value, str):
                                    issuing_agency = field_value
                            elif field_text == "主题分类":
                                if isinstance(field_value, list) and field_value:
                                    topic_category = ", ".join(filter(None, field_value)) # Join non-empty strings
                                elif isinstance(field_value, str):
                                    topic_category = field_value
                            elif field_text == "相关附件":
                                if isinstance(field_value, dict):
                                    attachments = field_value

            # --- Store extracted info ---
            news_item = {
                "index": index + 1,
                "title": title,
                "url": url,
                "summary": summary,
                "publication_date": doc_date,
                "source": source_site,
                "issuing_agency": issuing_agency,
                "topic_category": topic_category,
                "attachments": attachments
            }
            extracted_news.append(news_item)

    else:
        print("错误: JSON 结构不符合预期。未找到 'resultDocs' 列表或 'ok' 不为 true。")
        # 打印接收到的数据结构以便调试
        print("收到的数据结构:", json.dumps(data, indent=2, ensure_ascii=False)[:500] + "...")


    return extracted_news

def fetch_news_page(date_str, page, page_size=50):
    """获取指定日期和页码的新闻数据"""
    url = 'https://www.beijing.gov.cn/so/ss/query/s'
    
    headers = {
        'Accept': 'application/json, text/javascript, */*; q=0.01',
        'Accept-Language': 'zh-CN,zh;q=0.9,en;q=0.8',
        'Connection': 'keep-alive',
        'Content-Type': 'application/x-www-form-urlencoded; charset=UTF-8',
        'Origin': 'https://www.beijing.gov.cn',
        'Referer': 'https://www.beijing.gov.cn/so/s?tab=all&siteCode=1100000088',
        'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36',
        'X-Requested-With': 'XMLHttpRequest'
    }
    
    data_payload = {
        'tab': 'all',
        'siteCode': '1100000088',
        'qt': '("的")',
        'fileType': '',
        'timeOption': '2',
        'sort': 'dateDesc',
        'keyPlace': '0',
        'startDateStr': date_str,
        'endDateStr': date_str,
        'adv': '1',
        'locationCode': '110000000000',
        'page': str(page),
        'pageSize': str(page_size),
        'ie': '061dfcc5-6599-4ab8-8379-d06539e883ce'
    }
    
    print(f"正在获取 {date_str} 的第 {page} 页数据...")
    
    try:
        session = requests.Session()
        response = session.post(url, headers=headers, data=data_payload, timeout=20)
        response.raise_for_status()
        
        print(f"响应状态码: {response.status_code}, 内容长度: {len(response.text)}")
        
        try:
            response_json = response.json()
            
            if "resultDocs" in response_json:
                print(f"resultDocs包含 {len(response_json['resultDocs'])} 条记录")
            else:
                print("警告: 响应中没有找到resultDocs字段")
                print("响应内容片段:", response.text[:200] + "...")
            
            news_list = parse_news_data_from_dict(response_json)
            if not news_list:
                print(f"第 {page} 页没有获取到新闻数据。")
                print("响应内容片段:", response.text[:500] + "...")
                return []
                
            # 过滤重复新闻
            unique_news = filter_duplicate_news(news_list)
            print(f"第 {page} 页成功获取 {len(unique_news)} 条新闻（过滤重复后）")
            return unique_news
                
        except json.JSONDecodeError as je:
            print(f"JSON解析错误: {je}")
            print("响应内容片段:", response.text[:500] + "...")
            return []
            
    except Exception as e:
        print(f"获取第 {page} 页时出错: {e}")
        return []

def fetch_news_by_date(date_str, max_pages=25, page_size=50, max_workers=5):
    """按指定日期采集新闻，使用多线程并行采集"""
    all_news = []
    news_queue = queue.Queue()
    
    def worker(page):
        """工作线程函数，获取指定页的新闻并放入队列"""
        news_list = fetch_news_page(date_str, page, page_size)
        if news_list:
            news_queue.put(news_list)
        time.sleep(1)  # 添加短暂延迟，避免请求过于频繁
    
    # 使用线程池并行获取新闻
    with ThreadPoolExecutor(max_workers=max_workers) as executor:
        executor.map(worker, range(1, max_pages + 1))
    
    # 从队列中收集所有新闻
    while not news_queue.empty():
        all_news.extend(news_queue.get())
    
    print(f"{date_str} 总共采集到 {len(all_news)} 条新闻")
    return all_news

def crawl_date_range(start_date, end_date, max_workers=5):
    """采集指定日期范围内的新闻，使用多线程并行处理多个日期"""
    all_collected_news = []
    date_queue = queue.Queue()
    results_lock = threading.Lock()
    
    # 生成日期列表
    current_date = start_date
    while current_date <= end_date:
        date_queue.put(current_date)
        current_date += timedelta(days=1)
    
    def date_worker():
        """日期工作线程函数，处理队列中的日期"""
        while not date_queue.empty():
            try:
                date = date_queue.get(block=False)
                date_str = date.strftime("%Y-%m-%d")
                print(f"\n===== 开始采集 {date_str} 的新闻 =====")
                
                # 采集当天的新闻
                news_for_day = fetch_news_by_date(date_str, max_pages=25, max_workers=3)
                
                # 保存当天的新闻到数据库
                if news_for_day:
                    save_to_mysql(news_for_day)
                    with results_lock:
                        all_collected_news.extend(news_for_day)
                
                date_queue.task_done()
            except queue.Empty:
                break
            except Exception as e:
                print(f"处理日期时出错: {e}")
    
    # 创建并启动日期工作线程
    threads = []
    for _ in range(max_workers):
        thread = threading.Thread(target=date_worker)
        thread.start()
        threads.append(thread)
    
    # 等待所有线程完成
    for thread in threads:
        thread.join()
    
    return all_collected_news

# --- 主执行块 ---
if __name__ == "__main__":
    # 设置采集当天的新闻
    today = datetime.now().date()
    start_date = today
    end_date = today
    print(f"开始采集从 {start_date} 到 {end_date} 的新闻...")
    all_news = crawl_date_range(start_date, end_date, max_workers=10)
    
    print(f"\n采集完成，总共获取了 {len(all_news)} 条新闻。")