import urllib
import time
import scrapy

from ..items import ChezhiwangItem


class CzwSpider(scrapy.Spider):
    name = "czw"
    allowed_domains = ["www.12365auto.com"]
    start_urls = ["https://www.12365auto.com/zlts/0-0-0-0-0-0_0-0-0-0-0-0-0-1.shtml"]

    def parse(self, response):
        """
        根据编号获取对应的 bug 信息
        :param response:
        :return:
        """

        urls = {
            "1162459",
            # "1163039",
            # "1162592",
            # "1163037",
            # "1163109",
            # "1163031",
            # "1163082",
            # "1163226",
        }
        for url1 in urls:
            # 构建完整的URL
            full_url = response.url + '?' + urllib.parse.urlencode({"wd": url1})

            # 创建请求
            yield scrapy.Request(full_url, callback=self.parse_bug)


    def parse_bug(self, response):
        time.sleep(2)
        bug_list = response.xpath('//td[@class="tsgztj"]/span/text()').extract_first()
        print(bug_list)
        # print(bug_list)
        # bug_content = ''
        # for bug in range(len(bug_list)):
        #     bug_content += bug_list[bug]+" "
        # print(bug_content)
        # yield ChezhiwangItem(bug=bug_content)





