import datetime
import json
from urllib.parse import urlencode

import scrapy

from web_crawler.config import get_cookies, get_headers
from web_crawler.items import FundHoldingItem
from web_crawler.utils import MySQLPool, DateProcess


class XueqiuFundHoldingSpider(scrapy.Spider):
    name = 'xueqiu_fundholding'
    custom_settings = {
        'CONCURRENT_REQUESTS': 64,
        'DOWNLOAD_DELAY': 0.05,
        'COOKIES_ENABLED': True,
        'RETRY_TIMES': 3,
        'RETRY_HTTP_CODES': [500, 502, 503, 504, 400, 403, 404, 408],
        'ITEM_PIPELINES': {
            'web_crawler.pipeline.fundholding.FundholdingPipeline': 300,
        }
    }
    BACK_YEAR = 1 # 回顾年份

    # 从MySQL获取symbol列表的SQL
    SYMBOL_SQL = """
       SELECT distinct symbol FROM xueqiu_stocks WHERE SUBSTR(symbol, 1, 2) IN ('SH', 'SZ', 'BJ');
       """
    def __init__(self):
        super(XueqiuFundHoldingSpider, self).__init__()
        self.pool = MySQLPool()
        self.symbols = self._fetch_symbols_from_db() # 股票列表


    def _fetch_symbols_from_db(self):
        """从MySQL获取symbol列表"""
        conn = self.pool.get_conn()

        try:
            with conn.cursor() as cursor:
                cursor.execute(self.SYMBOL_SQL)
                return [row['symbol'] for row in cursor.fetchall()]
        finally:
            conn.close()

    def start_requests(self):
        # 主要方法，请求股票基金持股数据，获取1年前至今
        report_dates = DateProcess.get_past_report_dates(self.BACK_YEAR)
        for symbol in self.symbols:
            for report_date in report_dates:
                params = {
                    'symbol': symbol,
                    'timestamp': DateProcess.str_date_to_timestamp(report_date),
                    'extend': 'true'
                }
                yield scrapy.Request(
                    url=f"https://stock.xueqiu.com/v5/stock/f10/cn/org_holding/detail.json?{urlencode(params)}",
                    callback = self.parse_fund_holding,
                    cookies=get_cookies(),
                    headers=get_headers(),
                    meta={'symbol': symbol, 'report_date': report_date},
                )

    def parse_fund_holding(self, response):
        """解析基金持股数据"""
        meta = response.meta
        data = json.loads(response.text)
        # 检查是否有数据
        fund_items = data['data'].get('fund_items', [])
        if not fund_items:
            self.logger.debug(f"股票 {meta['symbol']} 在 {meta['report_date']} 无基金持股数据")
            return  # 显式返回None

        # 解析每条基金持股数据
        for item_data in fund_items:

            item = FundHoldingItem()
            org_name = item_data.get('org_name_or_fund_name')
            # 过滤字段
            if  org_name == '全部合计':
                continue
            # 设置字段值
            item['org_name_or_fund_name'] = org_name
            item['held_num'] = item_data.get('held_num')
            item['to_float_shares_ratio'] = item_data.get('to_float_shares_ratio')
            item['symbol'] = meta['symbol']
            item['report_date'] = meta['report_date']

            yield item
