import scrapy
import copy
from ..items import INDUSTRY_NEWSItem
from urllib import parse
from lxml import etree
from datetime import datetime
import re
import json

headers = {
  'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9',
  'Accept-Language': 'zh-CN,zh;q=0.9',
  'Cache-Control': 'max-age=0',
  'Connection': 'keep-alive',
  'Cookie': 'sca=d7217d64; cna=dLogHCKr1T0CAXAUk908Mmdc; atpsida=fcf66baa3026d2fa9b02549b_1671023733_2',
  'Referer': 'https://news.cctv.com/',
  'Sec-Fetch-Dest': 'document',
  'Sec-Fetch-Mode': 'navigate',
  'Sec-Fetch-Site': 'same-site',
  'Sec-Fetch-User': '?1',
  'Upgrade-Insecure-Requests': '1',
  'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/108.0.0.0 Safari/537.36',
  'sec-ch-ua': '"Not?A_Brand";v="8", "Chromium";v="108", "Google Chrome";v="108"',
  'sec-ch-ua-mobile': '?0',
  'sec-ch-ua-platform': '"Windows"'
}

class CPT(scrapy.Spider):
    item_name = 'INDUSTRY_NEWSItem'
    name = 'Unmanned_spacecraft_mmq'
    custom_settings = {
        'DOWNLOAD_DELAY': 1,
        'DOWNLOADER_MIDDLEWARES': {
            # 代理ip 前开后不开
             'tutorial.middlewares.RandomProxyMiddleware': 101,
            # 'TutorialSpiderMiddleware':200
        }}

    def start_requests(self):
        """
        构造一级请求
        :return: 一级请求
        """
        url = 'https://search.cctv.com/search.php?qtext=%E6%97%A0%E4%BA%BA%E8%88%AA%E5%A4%A9%E5%99%A8&type=web'
        yield scrapy.Request(url=url, headers=headers, method="get", callback=self.parse,dont_filter=True)

    def parse(self, response, **kwargs):
        item = INDUSTRY_NEWSItem()
        tree = etree.HTML(response.text)
        divs = tree.xpath('//*[@id="page_body"]/div[5]/div[1]/div[3]/div[3]/div[2]/div[1]/ul/li')
        #print(divs)
        for div in divs:
            full_title = div.xpath('./div/h3/span/a//text()')
            title = ""
            for i in full_title:
                title += i
            item['title'] = ''.join(title)  # 类型为String 标题
            item['source'] = 'https://search.cctv.com/search.php?qtext=%E6%97%A0%E4%BA%BA%E8%88%AA%E5%A4%A9%E5%99%A8&type=web'
            item['target'] = parse.urljoin(item['source'],div.xpath('./div/h3/span/a/@href')[0])
            #item['description']=div.xpath('.//tbody/tr[1]/td[2]/ul/li[2]//text()')
            item['third_class'] = '无人航天器'
            item['category'] = '行业新闻' # 类型为List  种类
            item['industry'] = '航空航天'  # 类型为List  行业
            item['spider_name'] = 'Unmanned_spacecraft_mmq'  # 类型为String  爬虫名称
            item['store_at'] =datetime.now().strftime("%Y-%m-%d, %H:%M:%S")   # 类型为datetime  存储时间
            item['item_name'] = 'INDUSTRY_NEWSItem'  # 类型为String  item名称
            item['contributor'] = '梅孟祺'  # 类型为String 爬虫书写者姓名
            #print(item)
            yield scrapy.Request(method='get', url=item['target'], headers=headers,dont_filter=True, callback=self.parse1,meta={'items': copy.deepcopy(item)})

    def parse1(self,response):
        text = etree.HTML(response.text)
        item = response.meta.get('items')
        item['content'] = ''.join(text.xpath('//*[@id="content_area"]//text()'))
        item['releaseAt'] = ''.join(text.xpath('//*[@id="title_area"]/div[2]//text()'))
        #print(item)
        yield item