# spiders/stock_spider.py
import scrapy
import json
import time
from urllib.parse import urlencode
from ..items import StockItem
from ..utils import get_calback, parse_jsonp
import logging

logger = logging.getLogger(__name__)


class StockSpider(scrapy.Spider):
    name = 'stock_spider'
    allowed_domains = ['datacenter-web.eastmoney.com']

    def __init__(self, code=None, *args, **kwargs):
        super(StockSpider, self).__init__(*args, **kwargs)
        self.code = code
        self.name = None  # 用于存储股票名称
        self.total_pages = 1  # 总页数初始化为1

    def start_requests(self):
        callback = get_calback()
        base_url = "https://datacenter-web.eastmoney.com/api/data/v1/get"
        headers = {
            "user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/131.0.0.0 Safari/537.36"
        }
        params = {
            'callback': callback,
            'reportName': 'RPTA_WEB_RZRQ_GGMX',
            'columns': 'ALL',
            'source': 'WEB',
            'sortColumns': 'date',
            'sortTypes': -1,
            'pageNumber': 1,
            'pageSize': 50,
            'filter': f'(scode="{self.code}")',
            '_': int(time.time())
        }

        url = f"{base_url}?{urlencode(params)}"

        yield scrapy.Request(
            url=url,
            headers=headers,
            callback=self.parse,
            meta={'params': params}
        )

    def parse(self, response):
        try:
            json_response = parse_jsonp(response.text)
            if not json_response or 'result' not in json_response:
                logger.error("Invalid JSON response received")
                return

            result = json_response.get('result', {})
            if result is None:
                logger.info("数据为空")
            else:
                pages = result.get('pages', 1)
                entries = result.get('data', [])

                # 如果是第一页，获取股票名称并记录总页数
                if response.meta['params']['pageNumber'] == 1:
                    entry = entries[0] if entries else None
                    self.name = entry.get('SECNAME') if entry else 'Unknown'
                    self.total_pages = pages
                    logger.info(f"开始采集 {self.name} 数据，共 {self.total_pages} 页")

                for idx, entry in enumerate(entries, start=1):
                    if not isinstance(entry, dict):
                        logger.warning(f"Entry at index {idx} is not a dictionary, skipping.")
                        continue

                    item = StockItem(
                        code=entry.get('SCODE'),
                        name=entry.get('SECNAME'),
                        related_links='股吧 资金流 数据',
                        latest_price=str(entry.get('SPJ')) if entry.get('SPJ') is not None else 'None',
                        price_change_percentage=f"{entry.get('ZDF')}%" if entry.get('ZDF') is not None else 'None',
                        price_change_amount=entry.get('RZMRE') - entry.get('RZCHE') if entry.get('RZMRE') is not None and entry.get('RZCHE') is not None else 'None',
                        volume_hands=str(entry.get('RQMCL')) if entry.get('RQMCL') is not None else 'None',
                        turnover_amount=f"{entry.get('RZRQYE') / 10000:.2f}亿" if entry.get('RZRQYE') is not None else 'None',
                        amplitude=f"{entry.get('RCHANGE10DCP')}%" if entry.get('RCHANGE10DCP') is not None else 'None',
                        highest=str(entry.get('high')) if entry.get('high') is not None else 'None',
                        lowest=str(entry.get('low')) if entry.get('low') is not None else 'None',
                        opening_price=str(entry.get('open_price')) if entry.get('open_price') is not None else 'None',
                        previous_close_price=str(entry.get('previous_close')) if entry.get('previous_close') is not None else 'None',
                        turnover_rate=f"{entry.get('FIN_BALANCE_GR')}%" if entry.get('FIN_BALANCE_GR') is not None else 'None',
                        dynamic_pe_ratio=str(entry.get('pe_ratio_dynamic')) if entry.get('pe_ratio_dynamic') is not None else 'None',
                        pb_ratio=str(entry.get('pb_ratio')) if entry.get('pb_ratio') is not None else 'None',
                    )
                    yield item

                current_page = response.meta['params']['pageNumber']
                logger.info(f"已完成 {self.name} 第 {current_page} 页数据的采集")

                if current_page < pages:
                    next_page = current_page + 1
                    params = response.meta['params'].copy()
                    params['pageNumber'] = next_page
                    url = f"{response.url.split('?')[0]}?{urlencode(params)}"
                    yield scrapy.Request(
                        url=url,
                        headers=response.request.headers,
                        callback=self.parse,
                        meta={'params': params}
                    )
                elif current_page == pages:
                    logger.info(f"完成 {self.name} 全部 {self.total_pages} 页数据的采集")
        except Exception as e:
            logger.error(f"Failed to process page: {e}")
            # 继续处理下一页或下一个请求，而不是停止整个爬虫