import scrapy
import re


class ResourceTradingSpider(scrapy.Spider):
    name = "resource_trading"
    allowed_domains = ["www.sdcqjy.com"]
    start_urls = ["http://www.sdcqjy.com/ty/zxdt/hydt"]
    headers = {
        'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/124.0.0.0 Safari/537.36',
    }
    count = 0
    items = []

    def parse(self, response):
        # 提取全部连接

        links = response.css('#artDataCont>ul>li>a')

        for link in links:
            item = {}
            item['id'] = self.count
            item['name'] = link.css('p::text').get()
            item['createdTime'] = link.css('em::text').get()
            self.items.append(item)
            a = re.search('"id":"(.*)","code"', link.css('::attr(onclick)').get()).group(1)
            resp = response.follow("http://www.sdcqjy.com/ty/article/hydt/" + a, self.parse_detail)
            resp.cb_kwargs['id'] = item['id']
            yield resp
            self.count += 1

    def parse_detail(self, response, id):
        art_cont = response.css('.art_cont').get()
        self.items[id - 1]['detail'] = art_cont
        # print(self.items[id-1])

    def __del__(self):
        print(self.items)
