
import json
import re
from datetime import datetime
from urllib.parse import urljoin

import scrapy
from lxml import etree

from ..items import INDUSTRY_NEWSItem

headers = {
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,/;q=0.8,application/signed-exchange;v=b3;q=0.9',
'Accept-Language': 'zh-CN,zh;q=0.9,en;q=0.8,en-GB;q=0.7,en-US;q=0.6',
'Cache-Control': 'max-age=0',
'Connection': 'keep-alive',
'Cookie': 'UM_distinctid=18503fd768f6c3-0ff9406fb5bde4-7a575470-144000-18503fd7690da1; CNZZDATA1279033566=2038466363-1670807635-null%7C1670807635; cc8_last_search=1670810083; cc8_last_search=1670811116',
'Referer': 'http://www.casmita.com/news/search.php?kw=COMS',
'Upgrade-Insecure-Requests': '1',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/108.0.0.0 Safari/537.36 Edg/108.0.1462.46'
}

class Primarch(scrapy.Spider):
    """
    爬虫
    """
    item_name = 'icCMOS'
    name = 'IC_CMOS'
    custom_settings = {
    'DOWNLOAD_DELAY': 1,
    'DOWNLOADER_MIDDLEWARES': {
}}
    def start_requests(self):
        """
        构造一级请求
        :return: 一级请求
        """
        start_url = "http://www.casmita.com/news/search.php?kw=cmos"
        yield scrapy.Request(url=start_url, headers=headers, method="get", callback=self.parse,
                                 dont_filter=True)

    def parse(self, response):
        """
        解析一级请求，构造二级请求
        :param response: 一级请求html
        :return: 二级请求
        """
        item = INDUSTRY_NEWSItem()
        text = etree.HTML(response.text)
        # text = re.sub('(?s)(?=<style).*?(?<=yle>)', '', response.text)  # 去除html-text中’style‘标签及其对应文本
        # text = re.sub('(?s)(?=<script).*?(?<=script>)', '', text)  # 去除html-text中’script‘标签及其对应文本
        # 声明，以上操作对使用xpath解析页面不造成影响
        # target_list = json.loads(text)['items']
        for i in range(0, 5):
            item['title'] = ''
            item['releaseAt'] = ''
            item['description'] = ''  # 类型为String  摘要
            item['content'] = ''  # 类型为String  正文

            item['target'] = text.xpath("/html/body/div[4]/div[2]/div/div/div/div/div[1]/a/@href")[i] # 类型为String  url
            item['third_class'] = 'CMOS'  # 类型为String  最后一级实体
            item['category'] = ['行业新闻']  # 类型为List  种类
            item['industry'] = ['集成电路']  # 类型为List  行业
            yield scrapy.Request(url=item['target'], method='get', callback=self.parse1, meta={'item': item},
                                 headers=headers)

    def parse1(self, response):
        """
        解析详情页数据
        :param response: 详细页html
        :return: Item 数据
        """
        item = response.meta.get('item')
        # text = re.sub('(?s)(?=<style).*?(?<=yle>)', '', response.text)  # 去除html-text中’style‘标签及其对应文本
        # text = re.sub('(?s)(?=<script).*?(?<=script>)', '', text)  # 去除html-text中’script‘标签及其对应文本
        # 声明，以上操作对使用xpath解析页面不造成影响
        response = etree.HTML(response.text)
        item['title'] = response.xpath("/html/body/div[4]/div[2]/div[2]/h1/text()")[0]
        item['releaseAt'] = response.xpath("/html/body/div[4]/div[2]/div[2]/div[1]/span[1]/text()")[0]
        item['description'] = response.xpath("/html/body/div[4]/div[2]/div[2]/div[2]/text()")  # 类型为String  摘要
        item['content'] = ''.join(response.xpath('//*[@class6="content"]//text()'))  # 类型为String  正文
        #
        # for keys in item:
        #     if type(item[keys]) == str:
        #         item[keys] = item[keys].replace('\xa0', '').replace('\r', " ").replace('\n', ' ').replace('  ',
        #                                                                                                   '').replace(
        #             '\t',
        #             ' ').replace(
        #             '\\t',
        #             '').replace(
        #             ' ', '').replace('\r\n ', ' ').replace(
        #             '\u3000\'', '').replace(r'\r', '').replace(r'\n', '').replace(r'\t', '')
        #         item[keys] = re.sub('(?<=\<).*?(?<=\>)', '', item[keys])
        #     if type(item[keys]) == list:
        #         item[keys] = [
        #             i.replace('\xa0', '').replace('\r', " ").replace('\n', ' ').replace('  ', '').replace('\t',
        #                                                                                                   ' ').replace(
        #                 '\\t',
        #                 '').replace(
        #                 ' ', '').replace('\r\n ', ' ').replace(
        #                 '\u3000\'', '').replace(r'\r', '').replace(r'\n', '').replace(r'\t', '') for i in item[keys]]
        #         item[keys] = [re.sub('(?<=\<).*?(?<=\>)', '', i) for i in item[keys]]
        print(item)
        # yield item