import time
import requests
from lxml import etree
import json
import clickhouse_connect
from datetime import datetime
from dateutil.parser import parse  # 用于解析各种日期时间格式

# clickhouse连接
client = clickhouse_connect.get_client(host='192.168.31.54', port=8123)


def check_and_create_table():
    """检查表是否存在，不存在则创建"""
    create_table_query = """
    CREATE TABLE IF NOT EXISTS test.cnbc_table
    (
        title String,
        date_time DateTime PRIMARY KEY,
        main_content String,
        data_url String,
        data_type String,
        eyebrow String,
        sectionLabel String,
        description String
    )
    ENGINE = MergeTree()
    ORDER BY date_time
    """
    client.command(create_table_query)
    print("已确认表test.cnbc_table存在")


def parse_datetime(dt_str):
    """将各种日期时间字符串转换为datetime对象"""
    if not dt_str:
        return None

    try:
        # 处理带有时区偏移的格式（如+0000）
        if dt_str.endswith('+0000'):
            dt_str = dt_str[:-5] + 'Z'  # 转换为ISO格式
        return parse(dt_str)
    except Exception as e:
        print(f"日期时间解析错误: {e}, 原始值: {dt_str}")
        return None


def save_data(title, date_time, main_content, data_url, data_type, eyebrow, sectionLabel, description):
    """ 将数据保存到 ClickHouse 中 """

    # 转换日期时间格式
    dt_obj = parse_datetime(date_time)
    if dt_obj is None:
        print(f"无法解析日期时间，跳过此条数据: {date_time}")
        return

    row = [title, dt_obj, main_content, data_url, data_type, eyebrow, sectionLabel, description]
    try:
        client.insert('test.cnbc_table', [row],
                      column_names=['title', 'date_time', 'main_content', 'data_url', 'data_type', 'eyebrow',
                                    'sectionLabel', 'description'])
        print(f'数据"{title[:30]}..."已保存到 ClickHouse')  # 只显示标题前30个字符
    except Exception as e:
        print(f"保存数据时出错: {e}")


# 超时重试机制
def request_with_retry(url, max_retries=3, timeout=10, headers=None, params=None):
    for attempt in range(max_retries):
        try:
            response = requests.get(url, headers=headers, params=params, timeout=timeout)
            response.raise_for_status()
            return response
        except requests.exceptions.RequestException as e:
            print(f"请求失败（第 {attempt + 1} 次重试）: {str(e)}")
            if attempt < max_retries - 1:
                time.sleep(2)
            else:
                raise


def get_content(data_url, date_time, data_type, eyebrow, sectionLabel, description):
    headers = {
        'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/133.0.0.0 Safari/537.36'
    }

    try:
        response = request_with_retry(data_url, headers=headers)
        html_str = response.content.decode()
    except requests.exceptions.RequestException as e:
        print(f"请求内容时出错: {e}")
        return

    try:
        root = etree.HTML(html_str)
        title = "".join(root.xpath("//h1/text()")).strip()
        if not title:
            print(f"未找到标题，跳过此条数据: {data_url}")
            return

        main_content = ""
        contents = root.xpath("//div[@class='group']")
        for item in contents:
            content = "".join(item.xpath("./p//text()")).strip()
            if content:
                main_content += content + "\n"

        if main_content:
            save_data(title, date_time, main_content.strip(), data_url, data_type, eyebrow, sectionLabel, description)
        else:
            print(f"未找到主要内容，跳过此条数据: {title}")
    except Exception as e:
        print(f"处理内容时出错: {e}")


def read_offset():
    """读取上次爬取的偏移量"""
    try:
        with open('offset.txt', 'r') as f:
            offset = int(f.read().strip())
            print(f"从偏移量 {offset} 开始爬取")
            return offset
    except FileNotFoundError:
        print("未找到偏移量文件，将从0开始")
        return 0  # 修改默认从0开始
    except Exception as e:
        print(f"读取偏移量时出错: {e}")
        return 0  # 出现其他错误时，返回默认值0


def write_offset(offset):
    """保存当前爬取的偏移量"""
    try:
        with open('offset.txt', 'w') as f:
            f.write(str(offset))
            print(f"当前偏移量 {offset} 已保存")
    except Exception as e:
        print(f"保存偏移量时出错: {e}")


numbers = [
    # Markets
    105200300,
    100003241,
    # Business
    20910258,
    10000664,
    10000108,
    10000110,
    10000115,
    19836768,
    106915556,
    10000142,
    10000098,
    10000116,
    # Investing
    21324812,
    104199263,
    100646281,
    104338521,
    15839135,
    20398120,
    # TECH
    100807029,
    108095384,
    101004656,
    10000026,
    10000110,
    10000066,
    100397778,
    102602634,
    104368876,
    # Politics
    10001079,
    105056552,
    10001059,
    108089932,
    105229267,
    105229289,
    105229295,
    105229305,
    # Investing Club
    107085830,
    106983828,
    107161393,
    107200255,
    107117219,
    # Pro
    102138233,
    107229440
]

# 程序开始时检查并创建表
check_and_create_table()

for number in numbers:
    # 保存当前偏移量
    write_offset(0)
    time.sleep(2)
    print(f'正在抓取id编号为: {number}的数据')
    offset = read_offset()
    for i in range(offset, 179, 24):
        headers = {
            "user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/133.0.0.0 Safari/537.36"
        }
        url = "https://webql-redesign.cnbcfm.com/graphql"
        params = {
            "variables": "{\"id\":\"" + str(number) + "\",\"offset\":" + str(
                i) + ",\"pageSize\":24,\"nonFilter\":true,\"includeNative\":false,\"include\":[]}",
            "extensions": "{\"persistedQuery\":{\"version\":1,\"sha256Hash\":\"43ed5bcff58371b2637d1f860e593e2b56295195169a5e46209ba0abb85288b7\"}}"
        }

        try:
            response = request_with_retry(url, headers=headers, params=params)
            html_str = response.content.decode()
            Initial_data = json.loads(html_str)

            # 列表类别标签
            eyebrow = Initial_data["data"]["assetList"].get('eyebrow', "无")
            # 数据列表集
            data_list = Initial_data["data"]["assetList"].get("assets", [])
            for data in data_list:
                data_url = data.get("url", "")
                if not data_url.startswith('http'):
                    continue  # 跳过无效URL

                date_time = data.get('datePublished', "")
                data_type = data.get('type', "")
                sectionLabel = data.get('section', {}).get('sectionLabel', "无")
                description = data.get('description', "")
                time.sleep(1)  # 适当减慢请求速度

                # 抓取内容并处理
                get_content(data_url, date_time, data_type, eyebrow, sectionLabel, description)

            # 保存当前偏移量
            write_offset(i + 24)

        except Exception as e:
            print(f"请求或处理数据时出错: {e}")
