import base64
import json
import requests
from datetime import datetime
from itemadapter import ItemAdapter
from requests.auth import HTTPBasicAuth
from scrapy.exceptions import DropItem
from scrapy.utils.serialize import ScrapyJSONEncoder



class XueqiuStockDorisPipeline:
    """获取雪球股票列表并通过StreamLoad写入Doris"""

    def __init__(self):
        self.logger = None
        # Doris StreamLoad配置
        self.doris_host = "172.26.245.203"  # Doris FE节点地址
        self.doris_port = "8030"  # StreamLoad默认端口
        self.doris_db = "xueqiu_data"  # 数据库名
        self.doris_table = "xueqiu_stocks"  # 表名
        self.doris_user = "root"  # Doris用户名
        self.doris_password = ""  # Doris密码
        # 批量写入配置
        self.batch_size = 200  # 每100条批量写入一次
        self.batch_data = []  # 批量数据缓存

    def open_spider(self, spider):
        """爬虫启动时自动调用"""
        self.logger = spider.logger
        self.logger.info("Doris StreamLoad管道初始化完成")


    def close_spider(self, spider):
        """爬虫关闭时处理剩余数据"""
        if len(self.batch_data) > 0:
            self._stream_load_batch()

    def process_item(self, item, spider):
        """处理并缓存Item数据"""
        adapter = ItemAdapter(item)
        # 数据验证
        if not adapter.get('symbol'):
            raise DropItem("股票数据缺少symbol字段")

        # 设置默认值
        adapter.setdefault('timestamp', datetime.now().strftime('%Y-%m-%d %H:%M:%S'))
        adapter.setdefault('market', 'CN')
        adapter.setdefault('page', 1)
        # 处理percent为None的情况
        percent = adapter.get('percent')
        if percent is None:
            percent = 0.0  # 设置为默认值0

        # 构造数据行
        data = {
            "symbol": adapter['symbol'],
            "name": adapter.get('name', ""),
            "current": adapter.get('current', 0.0),
            "percent": percent,
            "page": int(adapter.get('page', 1)),
            "market": adapter.get('market', 'CN'),
            "timestamp": datetime.now().strftime('%Y-%m-%d %H:%M:%S')
        }
        self.batch_data.append(data)

        # 达到批量大小时触发写入
        if len(self.batch_data) >= self.batch_size:
            self._stream_load_batch()

        return item


    def _stream_load_batch(self):
        """批量写入数据到Doris"""
        if not self.batch_data:
            return

        url = f"http://{self.doris_host}:{self.doris_port}/api/{self.doris_db}/{self.doris_table}/_stream_load"
        try:
            # 构建StreamLoad请求
            response = requests.put(
                url,
                auth=(self.doris_user, self.doris_password),
                headers={
                    "Content-Type": "application/json",
                    "Expect": "100-continue",
                    "format": "json",
                    "strip_outer_array": "true",
                    "merge_type": "MERGE",
                },
                data=json.dumps(self.batch_data),
            )
            # 检查响应状态
            if response.status_code != 200:
                raise Exception(f"请求失败，状态码: {response.status_code}")

            result = response.json()
            if result.get("Status") != "Success":
                raise Exception(f"StreamLoad处理失败: {result.get('Message')}")

            self.logger.info(f"成功写入{len(self.batch_data)}条数据到Doris")
            self.batch_data = []  # 清空缓存

        except Exception as e:
            self.logger.error(f"写入Doris失败: {str(e)}")
            # 可以选择保留数据下次重试，或者直接清空
            self.batch_data = []
            raise DropItem(f"Doris写入失败: {str(e)}")






