import requests
import json
import time
import random
from requests.adapters import HTTPAdapter
from datetime import datetime
import pytz
from pytz import utc
import clickhouse_connect
# 定时任务
from datetime import datetime
from apscheduler.schedulers.blocking import BlockingScheduler

# 设置时区为美国东部时间
USE = pytz.timezone("US/Eastern")

# clickhouse连接
client = clickhouse_connect.get_client(host='192.168.31.54', port=8123)

database = 'test'
table = 'sina_table'


def create_table_if_not_exists():
    create_table_query = """
    CREATE TABLE IF NOT EXISTS test.sina_table (
        id UInt64,
        create_time DateTime,
        rich_text String,
        tag String,
        PRIMARY KEY (id)
    ) ENGINE = MergeTree()
    ORDER BY (id)
    """
    client.command(create_table_query)


def save_to_clickhouse(id, create_time, rich_text, tag):
    # 确保create_time是datetime对象（UTC时间，无时区信息）
    if isinstance(create_time, str):
        # 将字符串解析为美国东部时间
        naive_time = datetime.strptime(create_time, "%Y-%m-%d %H:%M:%S")
        # 本地化为美国东部时间
        us_eastern_time = USE.localize(naive_time)
        # 转换为UTC时间并移除时区信息
        create_time = us_eastern_time.astimezone(utc).replace(tzinfo=None)

    row = [id, create_time, rich_text, tag]
    client.insert('test.sina_table', [row], column_names=['id', 'create_time', 'rich_text', 'tag'])


def get_json_data(base_url, headers, retries=3):
    s = requests.Session()
    s.mount('http://', HTTPAdapter(max_retries=retries))
    s.mount('https://', HTTPAdapter(max_retries=retries))
    print("当前本地时间:", time.strftime(r'%Y-%m-%d %H:%M:%S'))  # 打印当前本地时间

    while retries > 0:
        try:
            response = requests.get(base_url, timeout=5, headers=headers)
            response.raise_for_status()
            html = response.text
            html_cl = html[12:-14]
            html_json = json.loads(html_cl)
            data = html_json['result']['data']['feed']['list']
            return data
        except Exception as e:
            print('获取数据时发生错误:', e)
            retries -= 1
            time.sleep(5)

    print('所有重试均失败，停止请求。')
    return []


def main():
    create_table_if_not_exists()
    processed_ids = set()
    page = 1

    while True:
        try:
            print(f"请求第 {page} 页数据")
            headers = {
                "User-Agent": "Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/79.0.3945.117 Safari/537.36"
            }
            base_url = f"http://zhibo.sina.com.cn/api/zhibo/feed?callback=jQuery0&page={page}&zhibo_id=152"

            data = get_json_data(base_url, headers)
            if not data:
                print('没有获取到数据，结束请求。')
                break

            for item in data:
                try:
                    id = int(item['id'])
                    create_time = item['create_time']
                    rich_text = item['rich_text']
                    tag = str(item['tag'])

                    if id in processed_ids:
                        continue

                    print(f"ID: {id}, Create Time (原始数据): {create_time}, Rich Text: {rich_text}, Tag: {tag}")
                    save_to_clickhouse(id, create_time, rich_text, tag)
                    processed_ids.add(id)
                except Exception as e:
                    print(f"处理数据时出错: {e}")
                    continue

            page += 1
            time.sleep(random.randint(1, 3))
        except Exception as e:
            print('发生错误:', e)
            time.sleep(5)
            continue


# 更新数据操作
# 获取表中最大的id
def get_latest_id(table='sina_table'):
    query = f'''
    SELECT max(id) as latest_id
    FROM {database}.{table}
    '''
    try:
        result = client.query(query)
        latest_id = result.result_rows[0][0] if result.result_rows else None
        return latest_id
    except Exception as e:
        print(f"查询表 {table} 的最新日期失败: {str(e)}")
        return None


def update_data():
    """
    更新数据函数，遇到第一个已存在的ID就立即停止
    """
    create_table_if_not_exists()
    latest_id = get_latest_id('sina_table')
    processed_ids = set()
    page = 1

    while True:
        try:
            print(f"请求第 {page} 页数据")
            headers = {
                "User-Agent": "Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/79.0.3945.117 Safari/537.36"
            }
            base_url = f"http://zhibo.sina.com.cn/api/zhibo/feed?callback=jQuery0&page={page}&zhibo_id=152"

            data = get_json_data(base_url, headers)
            if not data:
                print('没有获取到数据，结束请求。')
                break

            for item in data:
                try:
                    id = int(item['id'])
                    create_time = item['create_time']
                    rich_text = item['rich_text']
                    tag = str(item['tag'])

                    # 如果id小于等于最新id，说明已经存在，立即停止
                    if latest_id and id <= latest_id:
                        print(f"遇到已存在的ID {id}，停止更新")
                        return

                    if id in processed_ids:
                        print(f"遇到重复ID {id}，停止更新")
                        return

                    print(f"新数据 ID: {id}, Create Time (原始数据): {create_time}, Rich Text: {rich_text}, Tag: {tag}")
                    save_to_clickhouse(id, create_time, rich_text, tag)
                    processed_ids.add(id)
                except Exception as e:
                    print(f"处理数据时出错: {e}")
                    continue

            page += 1
            time.sleep(random.randint(1, 3))
        except Exception as e:
            print('发生错误:', e)
            time.sleep(5)
            continue


if __name__ == "__main__":
    update_data()
    ###数据的更新操作、每五分钟开始更新一次
    scheduler = BlockingScheduler()
    scheduler.add_job(update_data, 'interval', minutes=5)
    scheduler.start()
