from collections import OrderedDict  
import scrapy  
from ..items import SsqItem  
from ..logger import logger  
from ..readident import readident  

class SsqSpider(scrapy.Spider):  
    name = 'SSQ'  
    allowed_domains = ['kaijiang.500.com']  
    start_urls = ['http://kaijiang.500.com/ssq.shtml']  
    ident_count = 0  
    ident_amount = 0  

    def start_requests(self):  
        # 设置自定义请求头，禁用 Expect 请求头  
        headers = {  
            'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.3',  
            'Expect': ''  # 禁用 Expect 请求头以避免 417 错误  
        }  
        for url in self.start_urls:  
            yield scrapy.Request(url, callback=self.parse, headers=headers, dont_filter=True)  

    def parse(self, response):  
        ident = 'ident'  
        idents = readident(ident)  
        idents = [ident[0] for ident in idents]  

        date_select_box_sel = ('//*[(@id = "change_date")]/following-sibling::*//a')  
        url_od = OrderedDict()  
        tag_a_list = response.xpath(date_select_box_sel)  

        for a in tag_a_list:  
            ident = a.re_first(r'(?<=>)\d+(?=<)')  
            url = a.attrib.get('href', '')  
            url_od[ident] = url  

        sel_idents = set(url_od.keys())  
        db_idents = set(idents)  
        process_idents = sorted(sel_idents.difference(db_idents))  
        SsqSpider.ident_amount = len(process_idents)  

        if not process_idents:  
            msg = "It's already the latest data."  
            logger.info(msg)  
            return  

        for i, ident in enumerate(process_idents, start=1):  
            url = url_od.get(ident)  
            yield scrapy.Request(url, callback=self.parse_ssq, meta={'id': len(idents) + i})  

    def parse_ssq(self, response):  
        SsqSpider.ident_count += 1  
        print(f"\rProcess: {SsqSpider.ident_count}/{SsqSpider.ident_amount}", end="")  
        ssq = SsqItem()  

        try:  
            table_sel = '//*[starts-with(@class, "kj_tablelist")]'  
            tables = response.xpath(table_sel)  
            table_count = len(tables)  

            if table_count == 2:  
                kaijiang_table = tables[0]  
                detail_table = tables[1]  
            elif table_count == 3:  
                kaijiang_table = tables[0]  
                detail_table = tables[2]  
            else:  
                raise NotImplementedError("Table Count Error!")  

            # kaijiang table  
            ssq['id'] = response.request.meta.get('id')  
            ssq['date'] = kaijiang_table.xpath('string(.//span[2])').re_first(r'\d{4}.+?\d+.+?\d+.+?')  
            ssq['ident'] = kaijiang_table.xpath('string(.//span[1]/a)').re_first(r'\d+')  
            balls = kaijiang_table.xpath('.//li').re(r'\d+')  
            for i, ball in enumerate(balls[:6], start=1):  
                ssq[f'red_{i}'] = ball  

            ssq['blue'] = balls[6]  

            # detail table  
            times = detail_table.xpath('./tr[3]/td[2]/text()').re(r'\d+')  
            ssq['times'] = times[-1].strip() if times else '0'  
            logger.info(f'{SsqSpider.ident_count}: {ssq["ident"]}')  

            yield ssq  # 直接返回数据  

        except Exception as e:  
            logger.error(f"Error processing {response.url}: {e}")