#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Author  : config_spider
import scrapy
import json
import math
import time
import urllib.parse

from apps.tax_policy.tax_policy.items import NetTaxPolicyItem as Item
from apps.tax_policy.tax_policy.spiders.base_spider.base_tax_policy_spider import BaseTaxPolicySpider


class JilinshengPolicy(BaseTaxPolicySpider):
    name = 'jilinsheng_policy'
    
    province = '吉林省'
    city = ''
    county = ''
    park = ''
    # source = '吉林省人民政府'
    url = 'https://xxgk.jl.gov.cn/szf/xxgk/gknr/lzyj/xzgfxwj/?channelid=139839&tit=%E7%9C%81%E8%B4%A2%E6%94%BF%E5%8E%85%E5%8E%85'
    
    def start_requests(self):
        for source, url in [
            ['吉林省发展和改革委员会', 'https://xxgk.jl.gov.cn/szf/xxgk/gknr/lzyj/xzgfxwj/?channelid=137922&tit=%E7%9C%81%E5%8F%91%E5%B1%95%E5%92%8C%E6%94%B9%E9%9D%A9%E5%A7%94%E5%91%98%E4%BC%9A'],
            ['吉林省财政厅', 'https://xxgk.jl.gov.cn/szf/xxgk/gknr/lzyj/xzgfxwj/?channelid=139839&tit=%E7%9C%81%E8%B4%A2%E6%94%BF%E5%8E%85'],
            ['吉林省民政厅', 'https://xxgk.jl.gov.cn/szf/xxgk/gknr/lzyj/xzgfxwj/?channelid=139796&tit=%E7%9C%81%E6%B0%91%E6%94%BF%E5%8E%85'],
            ['吉林省人力资源和社会保障厅', 'https://xxgk.jl.gov.cn/szf/xxgk/gknr/lzyj/xzgfxwj/?channelid=139846&tit=%E7%9C%81%E4%BA%BA%E5%8A%9B%E8%B5%84%E6%BA%90%E5%92%8C%E7%A4%BE%E4%BC%9A%E4%BF%9D%E9%9A%9C%E5%8E%85'],
            ['吉林省自然资源厅', 'https://xxgk.jl.gov.cn/szf/xxgk/gknr/lzyj/xzgfxwj/?channelid=139844&tit=%E7%9C%81%E8%87%AA%E7%84%B6%E8%B5%84%E6%BA%90%E5%8E%85'],
            ['吉林省交通运输厅', 'https://xxgk.jl.gov.cn/szf/xxgk/gknr/lzyj/xzgfxwj/?channelid=140350&tit=%E7%9C%81%E4%BA%A4%E9%80%9A%E8%BF%90%E8%BE%93%E5%8E%85'],
            ['吉林省农业农村厅', 'https://xxgk.jl.gov.cn/szf/xxgk/gknr/lzyj/xzgfxwj/?channelid=140390&tit=%E7%9C%81%E5%86%9C%E4%B8%9A%E5%86%9C%E6%9D%91%E5%8E%85'],
            ['吉林省商务厅', 'https://xxgk.jl.gov.cn/szf/xxgk/gknr/lzyj/xzgfxwj/?channelid=140391&tit=%E7%9C%81%E5%95%86%E5%8A%A1%E5%8E%85'],
            ['吉林省文化和旅游厅', 'https://xxgk.jl.gov.cn/szf/xxgk/gknr/lzyj/xzgfxwj/?channelid=140411&tit=%E7%9C%81%E6%96%87%E5%8C%96%E5%92%8C%E6%97%85%E6%B8%B8%E5%8E%85'],
            ['吉林省退役军人事务厅', 'https://xxgk.jl.gov.cn/szf/xxgk/gknr/lzyj/xzgfxwj/?channelid=136485&tit=%E7%9C%81%E9%80%80%E5%BD%B9%E5%86%9B%E4%BA%BA%E4%BA%8B%E5%8A%A1%E5%8E%85'],
            ['吉林省市场监督管理厅', 'https://xxgk.jl.gov.cn/szf/xxgk/gknr/lzyj/xzgfxwj/?channelid=140463&tit=%E7%9C%81%E5%B8%82%E5%9C%BA%E7%9B%91%E7%9D%A3%E7%AE%A1%E7%90%86%E5%8E%85'],
            ['吉林省统计局', 'https://xxgk.jl.gov.cn/szf/xxgk/gknr/lzyj/xzgfxwj/?channelid=140520&tit=%E7%9C%81%E7%BB%9F%E8%AE%A1%E5%B1%80'],
            ['吉林省地方金融管理局', 'https://xxgk.jl.gov.cn/szf/xxgk/gknr/lzyj/xzgfxwj/?channelid=140538&tit=%E7%9C%81%E5%9C%B0%E6%96%B9%E9%87%91%E8%9E%8D%E7%9B%91%E7%9D%A3%E7%AE%A1%E7%90%86%E5%B1%80'],
            ['吉林省林业和草原局', 'https://xxgk.jl.gov.cn/szf/xxgk/gknr/lzyj/xzgfxwj/?channelid=140888&tit=%E7%9C%81%E6%9E%97%E4%B8%9A%E5%92%8C%E8%8D%89%E5%8E%9F%E5%B1%80'],
            ['吉林省能源局', 'https://xxgk.jl.gov.cn/szf/xxgk/gknr/lzyj/xzgfxwj/?channelid=140935&tit=%E7%9C%81%E8%83%BD%E6%BA%90%E5%B1%80'],
        ]:
            yield scrapy.Request(url, callback=self.parse_content, meta={'source': source})

    def parse_content(self, response, **kwargs):
        # url = 'https://infogate.jl.gov.cn/govsearch/jsonp/gkml_xzgf20221026.jsp?'
        meta = response.meta
        url = response.xpath(""".""").re_first(r"url:\s*\'(.*)\',")
        url = response.urljoin(url)
        channelid = response.url.split('channelid=')[1].split('&')[0]
        stit = response.url.split('tit=')[1].split('&')[0]
        stit = urllib.parse.unquote(stit)
        # print('source', meta.get('source'), stit)
        params = {
            "page": "1",
            "size": "15",
            "keyword": "",
            "keywordCategory": "title",
            "channelid": channelid,
            "_": f"{int(time.time() * 1000)}"
        }
        temp_url = url + f'{"&" if "?" in url else "?"}{urllib.parse.urlencode(params)}'
        # print(temp_url)
        meta.update({"params": params, "url": url})
        yield scrapy.Request(temp_url, callback=self.parse_list, meta=meta)

    def parse_list(self, response, **kwargs):
        meta = response.meta
        params = meta.get('params')
        prev_item = meta.get('item')
        try:
            json_data = json.loads(response.text.strip())
        except:
            print(params)
        for elem in json_data['list']:
            item = Item()
            item['source_url'] = elem['puburl']
            item['source_url'] = response.urljoin(item['source_url'])
            item['publish_date'] = elem['pubdate']
            item['source'] = meta.get('source')
            if prev_item is not None:
                for key, value in prev_item.items():
                    item[key] = value
            if not item['source_url']:
                continue
            if '.htm' not in item['source_url'] and '.shtm' not in item['source_url']:
                continue
            yield response.follow(item['source_url'], callback=self.parse_detail, meta={'item': item})
        if meta.get("is_next") is not False:
            total = json_data['totalCount']
            params = meta.get('params')
            url = meta.get('url')
            print('source', meta.get('source'), total)
            total_page = math.ceil(int(total) / 15)
            for page_num in range(2, total_page + 1):
                params['page'] = page_num
                params['_'] = f"{int(time.time() * 1000)}"
                temp_url = url + f'{"&" if "?" in url else "?"}{urllib.parse.urlencode(params)}'
                meta.update({'item': prev_item, 'is_next': False, "params": params, "url": url})
                yield scrapy.FormRequest(temp_url, callback=self.parse_list, meta=meta)

    def parse_detail(self, response, **kwargs):
        item = Item() if response.meta.get('item') is None else response.meta.get('item')
        item['title'] = response.xpath("""string(//meta[@name='ArticleTitle']/@content)""").get()
        # item['publish_date'] = response.xpath("""string(//meta[@name='PubDate']/@content)""").get()
        item['content'] = response.xpath(""".""").get()
        item['province'] = self.province
        item['city'] = self.city
        item['county'] = self.county
        item['park'] = self.park
        # item['source'] = self.source
        yield item


if __name__ == "__main__":
    from scrapy import cmdline

    cmdline.execute("scrapy crawl jilinsheng_policy".split())
