import scrapy
import re

from shanghaiSpider.items import ShanghaispiderItem

class ShggzySpider(scrapy.Spider):
    name = "shggzy"
    allowed_domains = ["www.shggzy.com"]

    page_dict = {"https://www.shggzy.com/zcfgzhfg":7, "https://www.shggzy.com/zcfg":23, "https://www.shggzy.com/xxgkgz":8
                 # ,"https://www.shggzy.com/jyxxgc":1000, "https://www.shggzy.com/jyxxtd":311, "https://www.shggzy.com/jyxxzc":1000,
                 # "https://www.shggzy.com/jyxxcq":1000,"https://www.shggzy.com/jyxxjd":15,"https://www.shggzy.com/jyxxtl":483,
                 # "https://www.shggzy.com/jyxxjs":75,"https://www.shggzy.com/jyxxtpf":2,"https://www.shggzy.com/jyxxncys":1000,
                 # "https://www.shggzy.com/jyxxnc":813,"https://www.shggzy.com/jyxxpm":1000,"https://www.shggzy.com/jyxxyp":47,
                 # "https://www.shggzy.com/jyxxwzcg":1000,"https://www.shggzy.com/jyxxwtl":31,"https://www.shggzy.com/jyxxzl":132,
                 # "https://www.shggzy.com/tszc":1,"https://www.shggzy.com/zscq":454,"https://www.shggzy.com/jyxxlsjy":341,
                 # "https://www.shggzy.com/ysqjy":1,"https://www.shggzy.com/jyxxny":3,"https://www.shggzy.com/jyxxzf":1
                 }
    # start_urls = [
    #     "https://www.shggzy.com/zcfgzhfg", "https://www.shggzy.com/zcfg", "https://www.shggzy.com/xxgkgz"
    #     , "https://www.shggzy.com/jyxxgc", "https://www.shggzy.com/jyxxtd", "https://www.shggzy.com/jyxxzc"
    #     , "https://www.shggzy.com/jyxxcq", "https://www.shggzy.com/jyxxjd", "https://www.shggzy.com/jyxxtl"
    #     , "https://www.shggzy.com/jyxxjs", "https://www.shggzy.com/jyxxtpf", "https://www.shggzy.com/jyxxncys"
    #     , "https://www.shggzy.com/jyxxnc", "https://www.shggzy.com/jyxxpm", "https://www.shggzy.com/jyxxyp"
    #     , "https://www.shggzy.com/jyxxwzcg", "https://www.shggzy.com/jyxxwtl", "https://www.shggzy.com/jyxxzl"
    #     , "https://www.shggzy.com/tszc", "https://www.shggzy.com/zscq",
    # ]
    start_urls = page_dict.keys()

    cnt = 0
    def parse_subpage(self,response):
        item = ShanghaispiderItem()
        # print(response.text)
        title = response.xpath('//*[@id="detail_content"]/div/h2/text()').extract_first()
        # print("title = ",title)
        item['rule_title'] = title
        content = ''
        # content1 = response.xpath('//div[@class="content"]/p/span/strong/span/text()').extract()
        # context = "\n".join(content1)
        content2 = response.xpath('//div[@class="content"]/p/span/text()').extract()
        if not content2:
            content2 = response.xpath('//div[@class="content"]/p/text()').extract()
        for ctx2 in content2:
            content += ctx2.strip('\u3000')+"\n"
        # print("content = ", context)
        item['content'] = content
        release_time = response.xpath('//div[@class="content-box"]/p[@class="title_p"]/text()').extract_first()
        item['release_time'] = ''
        if not release_time:
            # 定义正则表达式模式
            pattern = r'发布时间：(\d{4}-\d{2}-\d{2} \d{2}:\d{2})'
            # 使用 re.search() 匹配模式
            match = re.search(pattern, release_time)
            if match:
                # match.group(1) 返回第一个捕获组的内容
                item['release_time'] = match.group(1)
        # if item['content'] and item['rule_title']:
        yield item

    def parse(self, response):
        print(response.url)
        urls = response.xpath('//div[@class="gui-title-bottom"]/ul/li/@onclick')

        for url in urls:
            if url:
                match = re.search(r"window\.open\('(/[^']+)'\)", url.get())
                if match:
                    url = match.group(1)
                    yield scrapy.Request(url="https://www.shggzy.com" + url, callback=self.parse_subpage)

        last_page_num = ShggzySpider.page_dict.get(response.url)
        if last_page_num:
            print("total page = ",last_page_num)
            for current_page in range(1, last_page_num):
                # print("next_page")
                yield scrapy.Request(url=response.url+"_" + str(current_page) + ".jhtml",
                                     callback=self.parse)