import time
import requests
from lxml import etree
import json
import clickhouse_connect
from datetime import datetime
from dateutil.parser import parse
import schedule

# ClickHouse连接配置
client = clickhouse_connect.get_client(host='192.168.31.54', port=8123)


def check_and_create_table():
    """检查表是否存在，不存在则创建"""
    create_table_query = """
    CREATE TABLE IF NOT EXISTS test.cnbc_table
    (
        title String,
        date_time DateTime,
        main_content String,
        data_url String,
        data_type String,
        eyebrow String,
        sectionLabel String,
        description String,
        PRIMARY KEY (title, date_time)
    )
    ENGINE = MergeTree()
    ORDER BY (title, date_time)
    """
    client.command(create_table_query)
    print("已确认表test.cnbc_table存在")


def parse_datetime(dt_str):
    """将各种日期时间字符串转换为datetime对象"""
    if not dt_str:
        return None

    try:
        # 处理带有时区偏移的格式（如+0000）
        if dt_str.endswith('+0000'):
            dt_str = dt_str[:-5] + 'Z'  # 转换为ISO格式
        return parse(dt_str)
    except Exception as e:
        print(f"日期时间解析错误: {e}, 原始值: {dt_str}")
        return None


def exists_in_db(title, date_time):
    """检查数据库中是否已存在相同标题和日期的记录"""
    query = """
    SELECT COUNT(*) 
    FROM test.cnbc_table 
    WHERE title = %(title)s 
    AND date_time = %(date_time)s
    """
    result = client.query(query, parameters={'title': title, 'date_time': date_time})
    return result.result_rows[0][0] > 0


def save_data(title, date_time, main_content, data_url, data_type, eyebrow, sectionLabel, description):
    """将数据保存到ClickHouse中"""
    # 转换日期时间格式
    dt_obj = parse_datetime(date_time)
    if dt_obj is None:
        print(f"无法解析日期时间，跳过此条数据: {date_time}")
        return False

    # 检查是否已存在
    if exists_in_db(title, dt_obj):
        print(f"数据已存在，跳过: {title[:30]}... (发布于 {dt_obj})")
        return False

    row = [title, dt_obj, main_content, data_url, data_type, eyebrow, sectionLabel, description]
    try:
        client.insert('test.cnbc_table', [row],
                      column_names=['title', 'date_time', 'main_content', 'data_url',
                                    'data_type', 'eyebrow', 'sectionLabel', 'description'])
        print(f'数据"{title[:30]}..."已保存 (发布于 {dt_obj})')
        return True
    except Exception as e:
        print(f"保存数据时出错: {e}")
        return False


def request_with_retry(url, max_retries=3, timeout=10, headers=None, params=None):
    """带重试机制的请求函数"""
    for attempt in range(max_retries):
        try:
            response = requests.get(url, headers=headers, params=params, timeout=timeout)
            response.raise_for_status()
            return response
        except requests.exceptions.RequestException as e:
            print(f"请求失败（第 {attempt + 1} 次重试）: {str(e)}")
            if attempt < max_retries - 1:
                time.sleep(2)
            else:
                raise


def get_content(data_url, date_time, data_type, eyebrow, sectionLabel, description):
    """获取并处理页面内容"""
    headers = {
        'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/133.0.0.0 Safari/537.36'
    }

    try:
        response = request_with_retry(data_url, headers=headers)
        html_str = response.content.decode()
    except requests.exceptions.RequestException as e:
        print(f"请求内容时出错: {e}")
        return False

    try:
        root = etree.HTML(html_str)
        title = "".join(root.xpath("//h1/text()")).strip()
        if not title:
            print(f"未找到标题，跳过此条数据: {data_url}")
            return False

        main_content = ""
        contents = root.xpath("//div[@class='group']")
        for item in contents:
            content = "".join(item.xpath("./p//text()")).strip()
            if content:
                main_content += content + "\n"

        if not main_content:
            print(f"未找到主要内容，跳过此条数据: {title[:30]}...")
            return False

        return save_data(title, date_time, main_content.strip(), data_url,
                         data_type, eyebrow, sectionLabel, description)
    except Exception as e:
        print(f"处理内容时出错: {e}")
        return False


def get_data(offset=0, page_size=24):
    """获取数据主函数"""
    numbers = [
        105200300, 100003241, 20910258, 10000664, 10000108, 10000110,
        10000115, 19836768, 106915556, 10000142, 10000098, 10000116,
        21324812, 104199263, 100646281, 104338521, 15839135, 20398120,
        100807029, 108095384, 101004656, 10000026, 10000110, 10000066,
        100397778, 102602634, 104368876, 10001079, 105056552, 10001059,
        108089932, 105229267, 105229289, 105229295, 105229305, 107085830,
        106983828, 107161393, 107200255, 107117219, 102138233, 107229440
    ]

    headers = {
        "user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/133.0.0.0 Safari/537.36"
    }
    url = "https://webql-redesign.cnbcfm.com/graphql"

    for number in numbers:
        print(f'正在抓取id编号为: {number}的数据')

        params = {
            "variables": json.dumps({
                "id": str(number),
                "offset": offset,
                "pageSize": page_size,
                "nonFilter": True,
                "includeNative": False,
                "include": []
            }),
            "extensions": json.dumps({
                "persistedQuery": {
                    "version": 1,
                    "sha256Hash": "43ed5bcff58371b2637d1f860e593e2b56295195169a5e46209ba0abb85288b7"
                }
            })
        }

        try:
            response = request_with_retry(url, headers=headers, params=params)
            Initial_data = json.loads(response.content.decode())

            eyebrow = Initial_data["data"]["assetList"].get('eyebrow', "无")
            data_list = Initial_data["data"]["assetList"].get("assets", [])

            for data in data_list:
                data_url = data.get("url", "")
                if not data_url.startswith('http'):
                    continue

                date_time = data.get('datePublished', "")
                data_type = data.get('type', "")
                sectionLabel = data.get('section', {}).get('sectionLabel', "无")
                description = data.get('description', "")

                # 如果内容已存在，跳过当前number的剩余数据
                if get_content(data_url, date_time, data_type, eyebrow, sectionLabel, description) is False:
                    print(f"检测到重复内容，跳过number {number}的剩余数据")
                    break

                time.sleep(1)  # 适当减慢请求速度

        except Exception as e:
            print(f"请求或处理数据时出错: {e}")
            continue


def job():
    """定时任务"""
    print(f"{datetime.now()}: 开始抓取数据...")
    check_and_create_table()
    get_data(offset=0, page_size=24)  # 只抓取第一页的数据
    print(f"{datetime.now()}: 数据抓取完成")


if __name__ == '__main__':
    # 立即执行一次数据抓取
    job()

    # 设置定时任务，每天执行一次
    schedule.every().day.at("10:00").do(job)

    while True:
        schedule.run_pending()
        time.sleep(60)  # 每分钟检查一次