# -*- coding: utf-8 -*-
"""
@File    : xizang_zzqscjg_policy
@Author  : caojy
@Time    : 24/12/03 10:27
"""
import datetime

import scrapy
import re
from apps.tax_policy.tax_policy.items import NetTaxPolicyItem as Item, urllib
from apps.tax_policy.tax_policy.spiders.base_spider.base_tax_policy_spider import BaseTaxPolicySpider


class XizangzzqscjgPolicy(BaseTaxPolicySpider):
    name = 'xizang_zzqscjg_policy'

    source = '西藏自治区市场监督管理局'
    province = '西藏自治区'
    city = ''
    county = ''
    park = ''
    url = 'http://nynct.xizang.gov.cn/zwgk/bmwj/'

    def start_requests(self):

        params = {
            "page": "1",
            "pagesize": "15",
            "siteCode": "xzscj",
            "channelCode": "gzhgfxwj"
        }
        url = 'http://amr.xizang.gov.cn/plugin/pager/pager.jsp?'
        yield scrapy.Request(url + urllib.parse.urlencode(params), meta={"params": params, "url": url}, callback=self.parse_list)

    def parse_list(self, response, **kwargs):
        prev_item = response.meta.get('item')
        for elem in response.json()['list']:
            item = Item()
            item['source_url'] = elem['s_url']
            item['source_url'] = response.urljoin(item['source_url'])
            item['publish_date'] = str(datetime.datetime.fromtimestamp(int(elem['d_fronttime']) / 1000))
            if prev_item is not None:
                for key, value in prev_item.items():
                    item[key] = value
            print(response.url, item['source_url'])
            yield response.follow(item['source_url'], callback=self.parse_detail, meta={'item': item})
        if response.meta.get("is_next") is not False:
            url = response.meta.get('url')
            params = response.meta.get('params')
            total_page = response.json()['pagecount']
            print(response.url, "page:", total_page)
            for page_num in range(1, int(int(total_page) + 1)):
                params['page'] = page_num
                yield response.follow(url + urllib.parse.urlencode(params),
                                      callback=self.parse_list, meta={'item': prev_item, 'is_next': False})

    def parse_detail(self, response, **kwargs):
        item = Item() if response.meta.get('item') is None else response.meta.get('item')
        item['title'] = response.xpath("""string(//meta[@name='ArticleTitle']/@content)""").get()
        # item['publish_date'] = response.xpath("""string(//meta[@name='PubDate']/@content)""").get() or response.xpath("""string(//meta[@name='pubdate']/@content)""").get() or response.xpath("//span[contains(text(), '发布时间：')]").re_first(r"发布时间：(\d{4}年\d{1,2}月\d{1,2})")
        # item['publish_date'] = item['publish_date'].replace('年', '-').replace('月', '-')
        print(item['publish_date'])
        item['content'] = response.xpath(""".""").get()
        item['source'] = self.source
        item['province'] = self.province
        item['city'] = self.city
        item['county'] = self.county
        item['park'] = self.park
        yield item


if __name__ == "__main__":
    from scrapy import cmdline

    cmdline.execute("scrapy crawl xizang_zzqscjg_policy".split())
