import json
import time
from datetime import datetime
from urllib.parse import urlencode

import scrapy

from web_crawler.config import get_headers, get_cookies


class XueqiuSpider(scrapy.Spider):
    name = "xueqiu_stock"
    allowed_domains = ["xueqiu.com"]
    # 支持的市场类型
    MARKETS = ['CN', 'HK']


    start_urls = [
        'https://xueqiu.com/service/screener/screen?page=1'
    ]
    # 自定义设置（会覆盖settings.py中的设置）
    custom_settings = {
        'CONCURRENT_REQUESTS': 16,  # 可以在这里覆盖全局设置
        'CONCURRENT_REQUESTS_PER_DOMAIN': 8,
        'DOWNLOAD_DELAY': 0.5,
        'ITEM_PIPELINES': {
            'web_crawler.pipeline.stocks.XueqiuStockPipeline': 300,
        }
    }

    def __init__(self, markets=None,*args, **kwargs):
        super().__init__(*args, **kwargs)
        self.markets = self._validate_markets(markets)
        self.base_url = 'https://xueqiu.com/service/screener/screen'


    def _validate_markets(self, markets):
        """验证并格式化市场参数"""
        if markets is None:
            return self.MARKETS  # 默认爬取所有市场

        if isinstance(markets, str):
            markets = [m.strip() for m in markets.split(',')]

        # 过滤无效市场
        valid_markets = [m for m in markets if m in self.MARKETS]
        if not valid_markets:
            raise ValueError(f"无有效市场，支持的市场: {self.MARKETS}")
        return valid_markets

    def start_requests(self):
        """为每个市场生成初始请求"""
        for market in self.markets:
            params = {
                'category': market,
                'size': '100',  # 每页100条
                'order': 'desc',
                'order_by': 'follow7d',
                'only_count': '0',
                'page': '1',
                "_": str(int(time.time() * 1000))
            }
            yield scrapy.Request(
                url=f"{self.base_url}?{urlencode(params)}",
                headers=get_headers(),
                cookies=get_cookies(),
                callback=self.parse_first_page,
                meta={
                    'params': params,
                    'market': market  # 传递市场类型
                }
            )
    def parse_first_page(self, response):
        """处理第一页并计算总页数"""
        data = json.loads(response.text)
        if not data.get('data'):
            self.logger.error("获取数据失败")
            return

        # 处理第一页数据
        yield from self.process_items(data['data']['list'], 1, market=response.meta['market'])
        # 计算总页数
        total = data['data']['count']
        total_pages = (total + 99) // 100  # 向上取整

        # 生成后续页请求
        for page in range(2, total_pages + 1):
            params = response.meta['params'].copy()
            market = response.meta['market']
            params['page'] = str(page)
            yield scrapy.Request(
                url=f"{self.base_url}?{urlencode(params)}",
                headers=get_headers(),
                cookies=get_cookies(),
                callback=self.parse_page,
                meta={
                    'page': page,
                    'market': market
                }
            )

    def parse_page(self, response):
        """处理后续页数据"""
        data = json.loads(response.text)
        market = response.meta['market']
        if data.get('data'):
            yield from self.process_items(data['data']['list'], response.meta['page'],market)

    def process_items(self, items, page, market):
        """统一处理每页数据"""
        for item in items:
            yield {
                'symbol': item.get('symbol'),
                'name': item.get('name'),
                'current': item.get('current'),
                'percent': item.get('percent'),
                'page': page,
                'market': market,
                'timestamp': datetime.now().strftime('%Y-%m-%d %H:%M:%S'),
            }


    def parse(self, response):
        data = json.loads(response.text)
        current_page = int(response.meta['params']['page'])

        # 如果是第一页，计算总页数并生成后续请求
        if current_page == 1 and 'data' in data:
            total_count = data['data']['count']
            page_size = int(response.meta['params']['size'])
            total_pages = (total_count + page_size - 1) // page_size
            self.logger.info(f"总记录数: {total_count}, 总页数: {total_pages}")
            for page_num in range(2, total_pages + 1):
                params = response.meta['params'].copy()
                params['page'] = str(page_num)
                yield scrapy.Request(
                    url=f"{self.base_url}?{urlencode(params)}",
                    headers=self.get_headers(),
                    cookies=self.get_cookies(),
                    callback=self.parse,
                    meta={'params': params},
                    dont_filter=True
                )


