# encoding='utf-8'

"""
@Author: 张晟烨
@Date: 2022/11/07
@Email: zhangsy@zylliondata.com
@Description: 行业新闻
@source：https://www.canalys.com/newsroom
@Version: Python3.8
@Modified By: 
"""

import json
import re
from datetime import datetime
from urllib.parse import urljoin

import scrapy
from lxml import etree

from ..items import INDUSTRY_NEWSItem

headers = {
  'authority': 'api.canalys.com',
  'accept': 'application/json, text/plain, */*',
  'accept-language': 'en',
  'origin': 'https://www.canalys.com',
  'referer': 'https://www.canalys.com/',
  'sec-ch-ua': '"Google Chrome";v="107", "Chromium";v="107", "Not=A?Brand";v="24"',
  'sec-ch-ua-mobile': '?0',
  'sec-ch-ua-platform': '"Windows"',
  'sec-fetch-dest': 'empty',
  'sec-fetch-mode': 'cors',
  'sec-fetch-site': 'same-site',
  'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/107.0.0.0 Safari/537.36',
  'Cookie': 'AWSALBAPP-0=_remove_; AWSALBAPP-1=_remove_; AWSALBAPP-2=_remove_; AWSALBAPP-3=_remove_'
}

class Primarch(scrapy.Spider):
    """
    爬虫
    """
    item_name = 'INDUSTRY_NEWSItem'
    # 等于爬虫的名字，调度的时候需要用
    name = 'canalyscom_industrynews_1_zsy'
    custom_settings = {
        'DOWNLOAD_DELAY': 1,
        'DOWNLOADER_MIDDLEWARES': {
            'tutorial.middlewares.RandomProxyMiddleware': 101,

        }}

    def start_requests(self):
        """
        构造一级请求
        :return: 一级请求
        """
        start_url = "https://api.canalys.com/api/newsroom/?page={nums}"
        for i in range(1, 10):
            yield scrapy.Request(url=start_url.format(nums=i), headers=headers, method="get", callback=self.parse,
                                 dont_filter=True)

    def parse(self, response):
        """
        解析一级请求，构造二级请求
        :param response: 一级请求html
        :return: 二级请求
        """
        item = INDUSTRY_NEWSItem()
        text = re.sub('(?s)(?=<style).*?(?<=yle>)', '', response.text)  # 去除html-text中’style‘标签及其对应文本
        text = re.sub('(?s)(?=<script).*?(?<=script>)', '', text)  # 去除html-text中’script‘标签及其对应文本
        # 声明，以上操作对使用xpath解析页面不造成影响
        target_list = json.loads(text)['items']
        for div in target_list:
            item['source'] = 'https://www.canalys.com/newsroom/'  # 类型为String 数据源
            item['description'] = ''  # 类型为String  摘要
            item['content'] = ''  # 类型为String  正文
            item['title'] = div['title']  # 类型为String  标题

            item['releaseAt'] = datetime(1971, 1, 1)

            item['target'] = item['source']+div['url'] # 类型为String  url
            item['third_class'] = ''  # 类型为String  最后一级实体
            item['category'] = []  # 类型为List  种类
            item['industry'] = ['新能源汽车产业']  # 类型为List  行业
            yield scrapy.Request(url=item['target'], method='get', callback=self.parse1, meta={'item': item},
                                 headers=headers)

    def parse1(self, response):
        """
        解析详情页数据
        :param response: 详细页html
        :return: Item 数据
        """
        item = response.meta.get('item')
        text = re.sub('(?s)(?=<style).*?(?<=yle>)', '', response.text)  # 去除html-text中’style‘标签及其对应文本
        text = re.sub('(?s)(?=<script).*?(?<=script>)', '', text)  # 去除html-text中’script‘标签及其对应文本
        # 声明，以上操作对使用xpath解析页面不造成影响
        response = etree.HTML(text)

        item['description'] = ''  # 类型为String  摘要
        item['content'] = ''.join(response.xpath('//*[@class6="content-data"]//text()'))  # 类型为String  正文

        for keys in item:
            if type(item[keys]) == str:
                item[keys] = item[keys].replace('\xa0', '').replace('\r', " ").replace('\n', ' ').replace('  ',
                                                                                                          '').replace(
                    '\t',
                    ' ').replace(
                    '\\t',
                    '').replace(
                    ' ', '').replace('\r\n ', ' ').replace(
                    '\u3000\'', '').replace(r'\r', '').replace(r'\n', '').replace(r'\t', '')
                item[keys] = re.sub('(?<=\<).*?(?<=\>)', '', item[keys])
            if type(item[keys]) == list:
                item[keys] = [
                    i.replace('\xa0', '').replace('\r', " ").replace('\n', ' ').replace('  ', '').replace('\t',
                                                                                                          ' ').replace(
                        '\\t',
                        '').replace(
                        ' ', '').replace('\r\n ', ' ').replace(
                        '\u3000\'', '').replace(r'\r', '').replace(r'\n', '').replace(r'\t', '') for i in item[keys]]
                item[keys] = [re.sub('(?<=\<).*?(?<=\>)', '', i) for i in item[keys]]

        yield item
