# encoding='utf-8'

"""
@Author: 张晟烨
@Date: 2022/11/08
@Email: zhangsy@zylliondata.com
@Description: 政策原文
@source：http://www.gdtbt.org.cn/laweurlex.aspx?page=1
@Version: Python3.8
@Modified By:
"""

import re
from datetime import datetime

import scrapy
from lxml import etree
import json
# from .tool_date_parser import datetime_parser1
from ..items import POLICYItem

headers = {
    'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9',
    'Accept-Language': 'en,zh-CN;q=0.9,zh;q=0.8,zh-TW;q=0.7',
    'Connection': 'keep-alive',
    'Cookie': 'ASP.NET_SessionId=xc0bqrfbeozakxhpdnvs1p05; ASP.NET_SessionId_NS_Sig=oenCV6md0W0g_VW_; _gscu_1628781594=678883947prnyc17; _gscbrs_1628781594=1; Hm_lvt_a62e2eab0e79f60347aab0ee4cab50e5=1667888423; Hm_lpvt_a62e2eab0e79f60347aab0ee4cab50e5=1667888697; _gscs_1628781594=67888394zww7r117|pv:30',
    'Referer': 'http://www.gdtbt.org.cn/laweurlex.aspx?page=1',
    'Upgrade-Insecure-Requests': '1',
    'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/107.0.0.0 Safari/537.36'
}


class Primarch(scrapy.Spider):
    """
    爬虫
    """
    item_name = 'POLICYItem'
    name = 'gdtbtorgcn_policy_1_zsy'
    custom_settings = {
        'DOWNLOAD_DELAY': 0.5,
        'DOWNLOADER_MIDDLEWARES': {
            # 代理ip 前开后不开
            # 'tutorial.middlewares.RandomProxyMiddleware': 101,
            # 'tutorial.middlewares.RandomProxyMiddleware': None,
        }}

    def start_requests(self):
        """
        构造一级请求
        :return: 一级请求
        """
        url = "https://bootapi.51bmj.cn/bmj-api/api/es/Policy/queryPolicyList.json?domain=undefined"
        payload = "{\"beginDate\":\"\",\"dq\":\"0200\",\"endDate\":\"\",\"pageIndex\":2,\"pageSize\":{nums},\"policyId\":\"\",\"source\":\"\",\"title\":\"新能源汽车\",\"userID\":\"\",\"whetherCanExport\":0}"
        for i in range(1, 8):
            yield scrapy.Request(url=url.format(nums=i), headers=headers, method="post", callback=self.parse,
                                 dont_filter=True , body= json.dumps(payload.format(nums = i)))

    def parse(self, response):
        """
        解析一级请求，构造二级请求
        :param response: 一级请求html
        :return: 二级请求
        """
        item = POLICYItem()
        text = re.sub('(?s)(?=<style).*?(?<=yle>)', '', response.text)  # 去除html-text中’style‘标签及其对应文本
        text = re.sub('(?s)(?=<script).*?(?<=script>)', '', text)  # 去除html-text中’script‘标签及其对应文本
        # 声明，以上操作对使用xpath解析页面不造成影响
        res = etree.HTML(text)
        target_list = res.xpath('//*[@class6="tablelist tablelaweurlex"]')
        for div in target_list[1:]:
            item['policy_name'] = ''.join(div.xpath('.//*[@class6="td3"]//text()'))  # 类型为String  政策名称
            item['text_number'] = ''.join(div.xpath('.//*[@class6="td0"]//text()'))   # 类型为String  发文字号

            # releaseAt = datetime_parser1(''.join(div.xpath('.//*[@class6="td5"]//text()')))
            # if releaseAt:
            #     item['releaseAt'] = datetime.strptime(releaseAt[0], "%Y-%m-%d %H:%M:%S")  # 类型为Date  举办时间
            # else:
            #     item['releaseAt'] = datetime(1971, 1, 1)
            item['releaseAt'] = datetime(1971, 1, 1)
            item['source'] = 'http://www.gdtbt.org.cn/laweurlex.aspx?page=1'  # 类型为String 数据源
            item['file_target'] = []  # 类型为List  文件地址
            item['issued'] = '欧盟'  # 类型为String  发布单位
            item['content'] = ''  # 类型为String  内容
            item['target'] = div.xpath('.//*[@class6="td4"]//@href')[0] # 类型为String  url
            item['third_class'] = '政策原文'  # 类型为String  最后一级实体
            item['category'] = []  # 类型为List  种类
            item['industry'] = []  # 类型为List  行业
            yield scrapy.Request(url=item['target'], method='get', callback=self.parse1, meta={'item': item},
                                 headers=headers)

    def parse1(self, response):
        """
        解析详情页数据
        :param response: 详细页html
        :return: Item 数据
        """
        item = response.meta.get('item')
        text = re.sub('(?s)(?=<style).*?(?<=/style>)', '', response.text)  # 去除html-text中’style‘标签及其对应文本
        text = re.sub('(?s)(?=<script).*?(?<=/script>)', '', text)  # 去除html-text中’script‘标签及其对应文本
        # 声明，以上操作对使用xpath解析页面不造成影响
        response = etree.HTML(text)
        item['content'] = ''.join(response.xpath('//p//text()'))

        for keys in item:
            if type(item[keys]) == str:
                item[keys] = item[keys].replace('\xa0', '').replace(
                    '\r', "").replace('\n', '').replace('\t', '').replace(' ', '').replace('\r\n ', '').replace(
                    '\u3000', '')
                item[keys] = re.sub('(?<=\<).*?(?<=\>)', '', item[keys])
            if type(item[keys]) == list:
                item[keys] = [i.replace('\xa0', '').replace(
                    '\r', "").replace('\n', '').replace('\t', '').replace(' ', '').replace('\r\n ', '').replace(
                    '\u3000', '') for i in item[keys]]
                item[keys] = [re.sub('(?<=\<).*?(?<=\>)', '', i) for i in item[keys]]

        yield item
