import json
import re
import time
from datetime import datetime

import requests
from lxml import etree
import clickhouse_connect
from urllib3.exceptions import InsecureRequestWarning
import sys
from apscheduler.schedulers.blocking import BlockingScheduler
import pandas as pd
import pyarrow as pa
from tqdm.auto import tqdm
from concurrent.futures import ThreadPoolExecutor, as_completed

# 禁用SSL警告
requests.packages.urllib3.disable_warnings(InsecureRequestWarning)

# ClickHouse连接配置
client = clickhouse_connect.get_client(host='192.168.31.54', port=8123)
database = 'test'
table = 'reuters_table'

headers = {
    "accept-language": "zh-CN,zh;q=0.9",
    "referer": "https://www.reuters.com/business/",
    "user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/134.0.0.0 Safari/537.36"
}


class ProxyManager:
    def __init__(self):
        self.current_proxy = self._get_new_proxy()

    def _get_new_proxy(self):
        """获取新的代理IP"""
        try:
            proxy = requests.get(
                'https://www.ipwo.net/api/proxy/get_proxy_ip?num=1&regions=US&protocol=http&return_type=txt&lb=1&sb=1',
                timeout=10
            ).text.strip()
            return {'https': f'http://{proxy}'}
        except Exception as e:
            print(f"获取代理失败: {str(e)}")
            return None

    def get_proxy(self):
        """获取当前代理"""
        return self.current_proxy

    def renew_proxy(self):
        """获取新代理并替换当前代理"""
        self.current_proxy = self._get_new_proxy()
        return self.current_proxy


def check_and_create_table():
    """创建新闻分析表"""
    create_table_query = f"""
    CREATE TABLE IF NOT EXISTS {database}.{table} (
        id String,
        title String,
        published_time DateTime,
        updated_time DateTime,
        canonical_url String,
        main_content String,
        article_type String,
        company_rics String,
        ad_topics String,
        description String
    ) ENGINE = MergeTree()
    ORDER BY (published_time)
    """
    client.command(create_table_query)
    print(f"新闻分析表 {table} 已创建或检查完毕。")


def get_existing_ids():
    """获取数据库中所有存在的ID（返回集合）"""
    query = f"SELECT id FROM {database}.{table}"
    try:
        result = client.query(query)
        return {row[0] for row in result.result_rows}
    except Exception as e:
        print(f"查询现有ID失败: {str(e)}")
        return set()


def request_with_proxy_fallback(url, proxy_manager, max_retries=3, timeout=10, headers=None, params=None):
    """带代理和失败重试机制的请求函数"""
    for attempt in range(max_retries):
        proxies = proxy_manager.get_proxy()
        if not proxies:
            print("无可用代理，尝试获取新代理...")
            proxies = proxy_manager.renew_proxy()
            if not proxies:
                print("无法获取代理IP，等待后重试...")
                time.sleep(5)
                continue

        try:
            # print(f"尝试第 {attempt + 1} 次请求，使用代理: {proxies['https']}")
            response = requests.get(
                url,
                headers=headers,
                params=params,
                proxies=proxies,
                timeout=timeout,
                verify=False
            )
            response.raise_for_status()
            return response
        except requests.exceptions.HTTPError as e:
            if e.response.status_code == 403:
                print(f"403禁止访问错误，更换代理重试...")
                proxy_manager.renew_proxy()
                if attempt == max_retries - 1:
                    raise
            else:
                print(f"HTTP错误({e.response.status_code})，不再重试: {str(e)}")
                raise
            time.sleep(2)
        except requests.exceptions.RequestException as e:
            print(f"请求失败（第 {attempt + 1} 次重试）: {str(e)}")
            if attempt == max_retries - 1:
                raise
            time.sleep(2)


def get_article_content(url, proxy_manager):
    """获取文章正文内容"""
    try:
        response = request_with_proxy_fallback(url, proxy_manager, headers=headers)
        html_str = response.content.decode()
        root = etree.HTML(html_str)
        content = "".join(root.xpath("//div[@class='article-body__content__17Yit']//text()"))
        return re.sub(r'For more insights like.*', '', content).strip()
    except Exception as e:
        print(f"获取文章内容失败: {str(e)}")
        return ""


def batch_insert_articles(articles_batch):
    """批量插入数据到ClickHouse，自动跳过已存在的ID"""
    if not articles_batch:
        return

    try:
        # 获取当前批次的所有ID
        batch_ids = {article['id'] for article in articles_batch}

        # 查询数据库中已存在的ID
        existing_ids_query = f"""
        SELECT id FROM {database}.{table} 
        WHERE id IN ({','.join([f"'{id}'" for id in batch_ids])})
        """
        existing_ids = {row[0] for row in client.query(existing_ids_query).result_rows}

        # 过滤掉已存在的文章
        filtered_articles = [article for article in articles_batch if article['id'] not in existing_ids]

        if not filtered_articles:
            print(f"所有 {len(articles_batch)} 篇文章已存在，跳过插入")
            return

        df = pd.DataFrame(filtered_articles)

        # 处理时间字段 - 使用ISO8601格式解析
        for time_col in ['published_time', 'updated_time']:
            if time_col in df.columns:
                df[time_col] = pd.to_datetime(df[time_col], format='ISO8601')

        # 转换为Arrow Table并批量插入
        arrow_table = pa.Table.from_pandas(df)
        client.insert_arrow(table, arrow_table, database=database)
        print(
            f"成功批量插入 {len(filtered_articles)} 篇文章（跳过 {len(articles_batch) - len(filtered_articles)} 篇已存在文章）")

    except Exception as e:
        print(f"批量插入失败，错误: {str(e)}")
        # 失败后尝试使用原生ClickHouse客户端批量插入
        try:
            data_for_insert = []
            for article in articles_batch:
                data_for_insert.append([
                    article['id'],
                    article['title'],
                    article['published_time'],
                    article['updated_time'],
                    article['canonical_url'],
                    article['main_content'],
                    article['article_type'],
                    article['company_rics'],
                    article['ad_topics'],
                    article['description']
                ])

            # 使用INSERT IGNORE语法避免重复
            client.command(f"""
            INSERT INTO {database}.{table} 
            (id, title, published_time, updated_time, canonical_url, 
             main_content, article_type, company_rics, ad_topics, description)
            VALUES {','.join(['(%s,%s,%s,%s,%s,%s,%s,%s,%s,%s)' for _ in data_for_insert])}
            """, parameters=[item for sublist in data_for_insert for item in sublist])

            print(f"使用原生客户端成功插入 {len(data_for_insert)} 篇文章")

        except Exception as e:
            print(f"原生客户端批量插入也失败，错误: {str(e)}")
            # 最后尝试单条插入
            success_count = 0
            for article in articles_batch:
                try:
                    client.command(f"""
                    INSERT INTO {database}.{table} 
                    (id, title, published_time, updated_time, canonical_url, 
                     main_content, article_type, company_rics, ad_topics, description)
                    VALUES (%s,%s,%s,%s,%s,%s,%s,%s,%s,%s)
                    """, parameters=[
                        article['id'],
                        article['title'],
                        article['published_time'],
                        article['updated_time'],
                        article['canonical_url'],
                        article['main_content'],
                        article['article_type'],
                        article['company_rics'],
                        article['ad_topics'],
                        article['description']
                    ])
                    success_count += 1
                except Exception as e:
                    print(f"插入单篇文章失败: {article['title'][:50]}..., 错误: {str(e)}")
            print(f"单条插入完成，成功插入 {success_count} 篇文章")


def fetch_articles(api_url, params, proxy_manager):
    """获取文章列表"""
    try:
        response = request_with_proxy_fallback(api_url, proxy_manager, headers=headers, params=params)
        data = response.json()
        return data['result']['articles']
    except Exception as e:
        print(f"获取文章列表失败: {str(e)}")
        return None


def process_article_batch(articles, proxy_manager, existing_ids=None):
    """处理一批文章并返回准备插入的数据"""
    base_url = 'https://www.reuters.com'
    processed_articles = []

    def process_single_article(article):
        try:
            if existing_ids and article['id'] in existing_ids:
                return None

            article_data = {
                'id': article['id'],
                'title': article['title'],
                'published_time': article['published_time'],
                'updated_time': article['updated_time'],
                'canonical_url': article['canonical_url'],
                'article_type': article['article_type'],
                'company_rics': article.get('company_rics', ''),
                'ad_topics': article['ad_topics'],
                'description': article['description']
            }

            url = base_url + article['canonical_url']
            article_data['main_content'] = get_article_content(url, proxy_manager)
            return article_data
        except Exception as e:
            print(f"处理文章时出错: {str(e)}")
            return None

    # 使用线程池并行处理文章内容获取
    with ThreadPoolExecutor(max_workers=5) as executor:
        futures = [executor.submit(process_single_article, article) for article in articles]
        for future in as_completed(futures):
            result = future.result()
            if result:
                processed_articles.append(result)

    return processed_articles


def get_articles(section_list):
    proxy_manager = ProxyManager()
    check_and_create_table()
    base_url = 'https://www.reuters.com'
    api_url = "https://www.reuters.com/pf/api/v3/content/fetch/articles-by-section-alias-or-id-v1"

    for section_id in section_list:
        print(f"开始处理section: {section_id}")
        offset = 0
        batch_size = 20
        max_empty_responses = 3
        empty_response_count = 0

        while True:
            params = {
                "query": json.dumps({
                    "arc-site": "reuters",
                    "fetch_type": "collection",
                    "offset": offset,
                    "section_id": section_id,
                    "size": batch_size,
                    "website": "reuters"
                })
            }

            try:
                articles_list = fetch_articles(api_url, params, proxy_manager)
                if articles_list is None:
                    print("获取文章列表时发生致命错误，跳过当前section")
                    break

                if not articles_list:
                    empty_response_count += 1
                    print(f"获取到空文章列表，当前空响应计数: {empty_response_count}/{max_empty_responses}")
                    if empty_response_count >= max_empty_responses:
                        print("连续多次获取到空响应，可能已无更多数据，处理下一个section")
                        break
                    time.sleep(10)
                    continue

                empty_response_count = 0
                processed_articles = process_article_batch(articles_list, proxy_manager)

                if processed_articles:
                    batch_insert_articles(processed_articles)

                offset += batch_size
                print(f"已处理 {batch_size} 篇文章，当前offset: {offset}")
                time.sleep(2)  # 批次间延迟

            except KeyboardInterrupt:
                print("用户中断，退出程序")
                sys.exit(0)
            except Exception as e:
                print(f"主循环发生未处理异常: {str(e)}")
                break


def update_articles(section_list):
    print(f"{datetime.now()}: 开始更新文章数据...")
    """更新文章数据（带ID存在检查）"""
    proxy_manager = ProxyManager()
    existing_ids = get_existing_ids()

    base_url = 'https://www.reuters.com'
    api_url = "https://www.reuters.com/pf/api/v3/content/fetch/articles-by-section-alias-or-id-v1"
    for section_id in section_list:
        print(f"开始处理section: {section_id}")
        params = {
            "query": json.dumps({
                "arc-site": "reuters",
                "fetch_type": "collection",
                "offset": 0,
                "section_id": section_id,
                "size": 20,
                "website": "reuters"
            })
        }

        try:
            articles = fetch_articles(api_url, params, proxy_manager)
            if not articles:
                print("未获取到新文章数据")
                continue
            processed_articles = process_article_batch(articles, proxy_manager, existing_ids)
            if processed_articles:
                batch_insert_articles(processed_articles)
                print(f"更新完成，新增 {len(processed_articles)} 篇文章")
            else:
                print("没有需要更新的新文章")

        except Exception as e:
            print(f"更新过程中发生错误: {str(e)}")


if __name__ == '__main__':
    section_list = ['/world/africa/', '/world/americas/', '/world/asia-pacific/', '/world/china/', '/world/europe/',
                    '/world/india/', '/world/israel-hamas/', '/world/japan/', '/world/middle-east/',
                    '/world/ukraine-russia-war/', '/world/uk/', '/world/us/', '/world/reuters-next/',
                    '/business/aerospace-defense/', '/business/autos-transportation/', '/business/davos/',
                    '/business/energy/', '/business/environment/', '/business/finance/',
                    '/business/healthcare-pharmaceuticals/', '/business/media-telecom/', '/business/retail-consumer/',
                    '/business/future-of-health/', '/business/future-of-money/', '/business/take-five/',
                    '/business/world-at-work/', '/markets/asia/', '/markets/carbon/', '/markets/commodities/',
                    '/markets/currencies/', '/markets/deals/', '/markets/emerging/', '/markets/etf/',
                    '/markets/europe/', '/markets/funds/', '/markets/global-market-data/', '/markets/rates-bonds/',
                    '/markets/stocks/', '/markets/us/', '/markets/wealth/', '/markets/macromatters/',
                    '/sustainability/boards-policy-regulation/', '/sustainability/climate-energy/',
                    '/sustainability/land-use-biodiversity/', '/sustainability/society-equity/',
                    '/sustainability/sustainable-finance-reporting/', '/sustainability/the-switch/',
                    '/sustainability/reuters-impact/', '/sustainability/cop/', '/legal/government/',
                    '/legal/legalindustry/', '/legal/litigation/', '/legal/transactional/', '/legal/us-supreme-court/',
                    '/breakingviews/predictions/', '/technology/artificial-intelligence/', '/technology/cybersecurity/',
                    '/technology/space/', '/technology/disrupted/']

    # 初始运行一次
    update_articles(section_list)

    # 设置定时任务
    scheduler = BlockingScheduler()
    # 注意这里传递的是函数引用和参数，而不是函数调用结果
    scheduler.add_job(update_articles, 'cron', args=[section_list], hour=9, minute=30)
    scheduler.start()
