# encoding='utf-8'

"""
@Author: 张晟烨
@Date: 2022/11/24
@Email: zhangsy@zylliondata.com
@Description: 行业新闻
@source：https://www.electrive.com/
@Version: Python3.8
@Modified By:
"""

import scrapy
from ..items import INDUSTRY_NEWSItem

from urllib.parse import urljoin
from lxml import etree
from datetime import datetime
import re
import json

headers = {
    'authority': 'www.electrive.com',
    'accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9',
    'accept-language': 'en,zh-CN;q=0.9,zh;q=0.8,zh-TW;q=0.7',
    'cache-control': 'max-age=0',
    'cookie': '__gads=ID=f3247d5de02ff947-227e59e51dd800d9:T=1667803685:RT=1667803685:S=ALNI_MblZwuC5GkI1xWP8FTcUQ2ux3qrpg; borlabs-cookie=%7B%22consents%22%3A%7B%22essential%22%3A%5B%22borlabs-cookie%22%5D%2C%22marketing%22%3A%5B%22google-adsense%22%5D%2C%22external-media%22%3A%5B%22facebook%22%2C%22googlemaps%22%2C%22instagram%22%2C%22twitter%22%2C%22vimeo%22%2C%22youtube%22%5D%7D%2C%22domainPath%22%3A%22www.electrive.com%2F%22%2C%22expires%22%3A%22Mon%2C%2008%20May%202023%2006%3A48%3A15%20GMT%22%2C%22uid%22%3A%22kxlgjpyn-v4l0jsuu-6lac493d-b333jh5l%22%2C%22version%22%3A%221%22%7D; __gpi=UID=00000b771120c1b4:T=1667803685:RT=1669291116:S=ALNI_MbEDdIQYfnQ_jPuCevrZ5MSNRdgfA',
    'sec-ch-ua': '"Google Chrome";v="107", "Chromium";v="107", "Not=A?Brand";v="24"',
    'sec-ch-ua-mobile': '?0',
    'sec-ch-ua-platform': '"Windows"',
    'sec-fetch-dest': 'document',
    'sec-fetch-mode': 'navigate',
    'sec-fetch-site': 'none',
    'sec-fetch-user': '?1',
    'upgrade-insecure-requests': '1',
    'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/107.0.0.0 Safari/537.36'
}


class Primarch(scrapy.Spider):
    """
    爬虫
    """
    item_name = 'INDUSTRY_NEWSItem'
    name = 'electrivecom_industrynews_1_zsy'
    custom_settings = {
        'DOWNLOAD_DELAY': 1,
        'DOWNLOADER_MIDDLEWARES': {
            # 代理ip 前开后不开
            'tutorial.middlewares.RandomProxyMiddleware': 101,
            # 'TutorialSpiderMiddleware':200
        }}

    def start_requests(self):
        """
        构造一级请求
        :return: 一级请求
        """
        url = 'https://www.electrive.com/category/automobile/page/{nums}/'
        for i in range(1, 3):
            print(url)
            yield scrapy.Request(url=url.format(nums=i), headers=headers, method="get", callback=self.parse,
                                 dont_filter=True)

    def parse(self, response):
    #     """
    #     解析一级请求，构造二级请求
    #     :param response: 一级请求html
    #     :return: 二级请求
    #     """
        print(response.text)
    #     item = INDUSTRY_NEWSItem()
    #     text = response.text
    #     # text = re.sub('(?s)(?=<style).*?(?<=yle>)', '', response.text)  # 去除html-text中’style‘标签及其对应文本
    #     # text = re.sub('(?s)(?=<script).*?(?<=script>)', '', text)  # 去除html-text中’script‘标签及其对应文本
    #     # 声明，以上操作对使用xpath解析页面不造成影响
    #     res = etree.HTML(text)
    #     target_list = res.xpath('//article')
    #     for div in target_list[:2]:
    #         item['description'] = ''.join(div.xpath('.//p//text()'))  # 类型为String  摘要
    #
    #         item['title'] = ''.join(div.xpath('.//h3//text()'))  # 类型为String  标题
    #
    #         item['releaseAt'] = datetime(1971, 1, 1)
    #         # if releaseAt:
    #         #     item['releaseAt'] = datetime.strptime(releaseAt[0], "%Y-%m-%d %H:%M:%S")  # 类型为Date  举办时间
    #         # else:
    #         #     item['releaseAt'] = datetime(1971, 1, 1)
    #
    #         item['source'] = 'https://www.electrive.com/'  # 类型为String 数据源
    #         item['target'] = div.xpath('.//a/@href')[0]  # 类型为String  url
    #         item['third_class'] = ''  # 类型为String  最后一级实体
    #         item['category'] = []  # 类型为List  种类
    #         item['industry'] = ['新能源汽车产业']  # 类型为List  行业
    #         yield scrapy.Request(url=item['target'], method='get', callback=self.parse1, meta={'item': item},
    #                              headers=headers)
    def parse(self, response, **kwargs):
        print(response.text)
        print(11111111111111111111,response.url)
        item = INDUSTRY_NEWSItem()
        tree = etree.HTML(response.text)
        target_list = tree.xpath('//article')
        print(target_list)
        for div in target_list[:2]:
            item['description'] = ''.join(div.xpath('.//p//text()'))
            item['title'] = ''.join(div.xpath('.//h3//text()'))
            item['target'] = div.xpath('.//a/@href')[0]
            item_response = scrapy.Request(url=item['target'], method='get', callback=self.parse1, meta={'item': item},
                                           headers=headers)
            yield item_response

    def parse1(self, response):
        """
        解析详情页数据
        :param response: 详细页html
        :return: Item 数据
        """
        item = response.meta.get('item')
        print(33333333333,response.url)
        text = re.sub('(?s)(?=<style).*?(?<=/style>)', '', response.text)  # 去除html-text中’style‘标签及其对应文本
        text = re.sub('(?s)(?=<script).*?(?<=/script>)', '', text)  # 去除html-text中’script‘标签及其对应文本
        # 声明，以上操作对使用xpath解析页面不造成影响
        response = etree.HTML(text)
        item['content'] = ''.join(response.xpath('//p//text()'))  # 类型为String  正文
        for keys in item:
            if type(item[keys]) == str:
                item[keys] = item[keys].replace('\xa0', '').replace(
                    '\r', "").replace('\n', '').replace('\t', '').replace(' ', '').replace('\r\n ', '').replace(
                    '\u3000', '')
                item[keys] = re.sub('(?<=\<).*?(?<=\>)', '', item[keys])
            if type(item[keys]) == list:
                item[keys] = [i.replace('\xa0', '').replace(
                    '\r', "").replace('\n', '').replace('\t', '').replace(' ', '').replace('\r\n ', '').replace(
                    '\u3000', '') for i in item[keys]]
                item[keys] = [re.sub('(?<=\<).*?(?<=\>)', '', i) for i in item[keys]]

        print(2222)
        # print(item)

        # yield item
