#!/usr/bin/env python
# _*_ coding: utf-8 _*_
# @Time : 2025/5/14 15:37
# @Author : fanzf
# @Version：V 0.1
# @File : gwy_spider.py
# @desc :
import scrapy

from cq_gwy.items import CqGwyItem


class GwySpider(scrapy.Spider):
    name = 'gwy_crawler'
    start_urls = [
        'https://rlsbj.cq.gov.cn/ztzl/zqs2020ndkslygwyzl/gwyks2025/202502/t20250228_14358696.html'
    ]

    # custom_settings = {
    #     'USER_AGENT': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36',
    #     'DOWNLOAD_DELAY': 2,
    #     'FEED_FORMAT': 'csv',
    #     'FEED_URI': 'output.csv'
    # }

    def parse(self, response):
        # 定位所有表格
        tables = response.css('table')

        # 处理第一个表格（根据实际情况调整索引）
        target_table = tables[0]

        # 提取表头
        headers = target_table.css('table tr.firstRow td::text').getall()
        self.logger.info(f"表头: {headers}")
        # print("headers:{}".format(headers))
        # 处理数据行
        for row in target_table.css('tr')[1:]:  # 跳过表头
            item = CqGwyItem()
            cells = row.css('td::text').getall()
            # print("cells：{}".format(cells))

            # 动态绑定字段
            for i, header in enumerate(headers):
                item[header.strip()] = cells[i].strip() if i < len(cells) else ''

            # print("item:{}".format(item))
            yield item
