#!/usr/bin/env python
# -*- coding: utf-8 -*-

import sys
import os
import pymysql
from datetime import datetime
from tabulate import tabulate

# 添加项目根目录到路径
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from news_crawler.config import MYSQL_CONFIG

def query_beijing_news():
    """
    查询所有北京相关表的当日新闻
    """
    conn = pymysql.connect(
        host=MYSQL_CONFIG['host'],
        user=MYSQL_CONFIG['user'],
        password=MYSQL_CONFIG['password'],
        database=MYSQL_CONFIG['database'],
        charset='utf8mb4'
    )
    
    try:
        with conn.cursor() as cursor:
            # 获取所有带beijing的表
            cursor.execute("""
                SELECT table_name 
                FROM information_schema.tables 
                WHERE table_schema = %s AND table_name LIKE %s
            """, (MYSQL_CONFIG['database'], '%beijing%'))
            
            tables = [row[0] for row in cursor.fetchall()]
            
            if not tables:
                print("未找到北京相关的新闻表")
                return
            
            print(f"找到以下北京相关表: {', '.join(tables)}")
            
            all_results = []
            table_counts = {}
            
            # 查询每个表的当日新闻
            for table in tables:
                sql = f"""
                SELECT 
                    '{table}' AS source_table,
                    id,
                    title,
                    content,
                    url,
                    publish_time,
                    crawl_time
                FROM 
                    {table}
                WHERE 
                    DATE(publish_time) = CURDATE()
                """
                cursor.execute(sql)
                results = cursor.fetchall()
                
                table_counts[table] = len(results)
                all_results.extend(results)
            
            # 按发布时间排序
            all_results.sort(key=lambda x: x[5], reverse=True)
            
            # 打印每个表的统计信息
            print("\n== 各表统计 ==")
            for table, count in table_counts.items():
                print(f"{table}: {count}条新闻")
            
            if all_results:
                headers = ["来源表", "ID", "标题", "内容", "URL", "发布时间", "爬取时间"]
                print("\n== 北京相关表的当日新闻 ==")
                print(tabulate(all_results, headers=headers, tablefmt="grid"))
                print(f"\n总计: {len(all_results)} 条当日新闻")
            else:
                print("\n今日无北京相关新闻")
                
            # 提供导出选项
            if all_results and input("\n是否导出到CSV文件? (y/n): ").lower() == 'y':
                export_to_csv(all_results, headers)
    
    finally:
        conn.close()

def export_to_csv(results, headers):
    """
    将结果导出到CSV文件
    """
    import csv
    from datetime import datetime
    
    filename = f"beijing_news_{datetime.now().strftime('%Y%m%d_%H%M%S')}.csv"
    
    try:
        with open(filename, 'w', newline='', encoding='utf-8-sig') as f:
            writer = csv.writer(f)
            writer.writerow(headers)
            writer.writerows(results)
        print(f"数据已导出到 {filename}")
    except Exception as e:
        print(f"导出失败: {str(e)}")

def query_beijing_news_by_date(date_str):
    """
    查询指定日期的北京相关新闻
    :param date_str: 日期字符串，格式为YYYY-MM-DD
    """
    try:
        # 验证日期格式
        query_date = datetime.strptime(date_str, '%Y-%m-%d').date()
    except ValueError:
        print("日期格式错误，请使用YYYY-MM-DD格式")
        return
    
    conn = pymysql.connect(
        host=MYSQL_CONFIG['host'],
        user=MYSQL_CONFIG['user'],
        password=MYSQL_CONFIG['password'],
        database=MYSQL_CONFIG['database'],
        charset='utf8mb4'
    )
    
    try:
        with conn.cursor() as cursor:
            # 获取所有带beijing的表
            cursor.execute("""
                SELECT table_name 
                FROM information_schema.tables 
                WHERE table_schema = %s AND table_name LIKE %s
            """, (MYSQL_CONFIG['database'], '%beijing%'))
            
            tables = [row[0] for row in cursor.fetchall()]
            
            if not tables:
                print("未找到北京相关的新闻表")
                return
            
            print(f"找到以下北京相关表: {', '.join(tables)}")
            
            all_results = []
            table_counts = {}
            
            # 查询每个表的指定日期新闻
            for table in tables:
                sql = f"""
                SELECT 
                    '{table}' AS source_table,
                    id,
                    title,
                    content,
                    url,
                    publish_time,
                    crawl_time
                FROM 
                    {table}
                WHERE 
                    DATE(publish_time) = %s
                """
                cursor.execute(sql, (date_str,))
                results = cursor.fetchall()
                
                table_counts[table] = len(results)
                all_results.extend(results)
            
            # 按发布时间排序
            all_results.sort(key=lambda x: x[5], reverse=True)
            
            # 打印每个表的统计信息
            print(f"\n== {date_str} 各表统计 ==")
            for table, count in table_counts.items():
                print(f"{table}: {count}条新闻")
            
            if all_results:
                headers = ["来源表", "ID", "标题", "内容", "URL", "发布时间", "爬取时间"]
                print(f"\n== {date_str} 北京相关表的新闻 ==")
                print(tabulate(all_results, headers=headers, tablefmt="grid"))
                print(f"\n总计: {len(all_results)} 条新闻")
            else:
                print(f"\n{date_str} 无北京相关新闻")
                
            # 提供导出选项
            if all_results and input("\n是否导出到CSV文件? (y/n): ").lower() == 'y':
                export_to_csv(all_results, headers)
    
    finally:
        conn.close()

if __name__ == "__main__":
    import argparse
    
    parser = argparse.ArgumentParser(description='查询北京相关表的新闻')
    parser.add_argument('-d', '--date', help='指定查询日期，格式为YYYY-MM-DD，默认为当天')
    
    args = parser.parse_args()
    
    if args.date:
        query_beijing_news_by_date(args.date)
    else:
        query_beijing_news()