import scrapy
from scrapy.loader import ItemLoader
import traceback
import logging
from scrapy import signals
import datetime
from scrapy.utils.project import get_project_settings

from week1.items import BaiduItem
from week1.utils import handel_datetime
logger = logging.getLogger()


class Day01Spider(scrapy.Spider):
    name = 'day01'
    # 百度新闻
    custom_settings = {
        'DOWNLOAD_DELAY' : 0.01,  # 下载延时
        'COOKIES_ENABLED' : False,  # 使用setting里的cookie
        "ITEM_PIPELINES": {'week1.pipelines.BDuPipeline': 10},  # 管道,数值越低优先级越高
        "DOWNLOADER_MIDDLEWARES": {'week1.middlewares.day01DownloaderMiddleware': 543},  # 设置下载中间件
    }

    # 初始化方法 crawl_date:时间限制3天
    def __init__(self,  crawl_date=3, *args, **kwargs):
        super().__init__(**kwargs)  # 继承父类的__init__
        self.date = int(crawl_date)
        # 关键字集合 要爬取的关键字列表
        self.park_set = ['中国', '明天']
        self.headers ={
            "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,"
                      "application/signed-exchange;v=b3;q=0.9",
            "Accept-Encoding": "gzip, deflate, br",
            "Accept-Language": "zh-CN,zh;q=0.9",
            "Cache-Control": "no-cache",
            "Connection": "keep-alive",
            "Host": "www.baidu.com",
            "Upgrade-Insecure-Requests": "1",
            'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) '
                          'Chrome/58.0.3029.110 Safari/537.36'
        }  # 初始化请求头

    @classmethod
    def from_crawler(cls, crawler, *args, **kwargs):
        o = super(Day01Spider, cls).from_crawler(crawler, *args, **kwargs)
        crawler.signals.connect(o.item_scraped, signal=signals.item_scraped)
        crawler.signals.connect(o.spider_closed, signal=signals.spider_closed)
        crawler.signals.connect(o.engine_started, signal=signals.engine_started)

        return o



    # 构造起始url
    def start_requests(self):
        # 遍历关键字集合
        for park_name in self.park_set:
            try:
                logger.info(f"开始爬取《{park_name}》的百度新闻。")
                # 初始url
                url = f'https://www.baidu.com/s?rtt=4&bsst=1&cl=2&tn=news&ie=utf-8&word={park_name}'
                # yield发送Request请求
                yield scrapy.Request(
                    url=url,
                    callback=self.parse,  # 回调函数
                    meta={
                        "keyword": park_name
                    },
                    headers=self.headers,
                    dont_filter=True  # 表示当前url不参与去重
                )
                logger.debug(f"开始请求《{park_name}》的百度新闻搜索页。")
            except Exception as e:
                logger.warning(f"获取《{park_name}》的百度新闻搜索结果页面时出错。")

    # 数据解析
    def parse(self, response):
        if response.status == 200:
            self.crawler.stats.inc_value('success_url')
        # 获取上一个request请求传递的数据
        keyword = response.meta['keyword']
        # 新闻发布时间超过date是否要继续爬取
        is_crawl = True
        print(f'开始解析《{keyword}》的搜索结果页面。')
        try:
            # 解析当前页面获得每条百度新闻的div
            result_list = response.xpath("//div[@class='result-op c-container xpath-log new-pmd']")
            if result_list:
                # 获取每条百度新闻的标题,来源，时间，以及文章页面的URL
                for result_item in result_list:
                    # 实例化itemloader对象
                    articleItemaLoader = ItemLoader(item=BaiduItem(), selector=result_item)
                    articleItemaLoader.add_xpath('title', 'string(./div/h3/a)')
                    articleItemaLoader.add_xpath('source', './/span[@class="c-color-gray c-font-normal c-gap-right"]'
                                                           '/text()')
                    articleItemaLoader.add_xpath('datetime', './/span[@class="c-color-gray2 c-font-normal"]/text()')
                    articleItemaLoader.add_xpath('x_href', './div/h3/a/@href')
                    articleItemaLoader.add_value('keyword', keyword)
                    # 将提取到的数据load出来  {}
                    articleInfo = articleItemaLoader.load_item()

                    # 判断时间是否在范围内
                    flag = handel_datetime.get_datetime(articleInfo['datetime'], self.date)
                    if flag:
                        articleInfo['datetime'] = flag
                        yield scrapy.Request(articleInfo['x_href'],
                                             callback=self.parse_data,
                                             meta={'item': articleInfo,
                                                   "keyword": keyword,
                                                   },
                                             dont_filter=True)
                        print(f"开始请求《{keyword}》中标题为《{articleInfo['title']}》的新闻详情页。")
                    else:
                        is_crawl = False
                        print(f"《{keyword}》最近{self.date}天新闻爬取完毕。")
                        break
        except Exception as e:
            print(f'解析{keyword}搜索页面出现异常')
            traceback.print_exc()

        # 判断新闻发布时间是否超过指定时间
        if is_crawl:
            # 判断是否有下一页
            next_un = response.xpath('//div[@class="page-inner"]/a[last()]/text()').extract_first()

            # 有下一页
            if next_un:
                # 下一页url
                next_url = 'https://www.baidu.com' + response.xpath('//div[@class="page-inner"]/a[last()]/@href').extract_first()
                yield scrapy.Request(url=next_url,
                                     callback=self.parse,
                                     headers=self.headers,
                                     meta={
                                         "keyword": keyword
                                     },
                                     dont_filter=True
                                     )
            else:
                print(f"《{keyword}》的搜索结果没有下一页，搜索页爬取完成。")
        else:
            print(f"{self.date}天内的{keyword}的百度新闻搜索页爬取完毕")

    def parse_data(self, response):
        keyword = response.meta['keyword']
        articleInfo = response.meta['item']
        title = articleInfo.get('title', '此标题为空')
        # 判断是否能正常获取文章内容
        is_except = True

        print(f"开始解析《{keyword}》标题为《{title}》的新闻详情页。")
        try:
            content_list = response.xpath('//p//text()').extract()
        except Exception as e:
            is_except = False
            print(f"标题为《{title}》的{keyword}》的百度新闻不能正常提取。")
            print('不能正常提取：', response)
            print(e)

        if is_except:
            articleItemaLoader = ItemLoader(item=articleInfo)
            articleItemaLoader.add_value('content', content_list)
            articleInfo = articleItemaLoader.load_item()
            content = articleInfo.get('content', '')

            # 判断文章内容是否为空
            if not content:
                print(f"《{keyword}》的百度新闻内容为空!。")
                print('内容为空:', response)
            elif keyword in title or keyword in content:
                return articleInfo

    def item_scraped(self, item, spider):
        self.crawler.stats.inc_value('url')

    def spider_closed(self, spider):
        print('成功获取:' + str(self.crawler.stats.get_value('success_url')) + '页数据')
        print('百度新闻一共成功抓取到{}条数据'.format(self.crawler.stats.get_value('url')))

