# -*- coding: utf-8 -*-
from scrapy.spiders import CrawlSpider
import scrapy
import re
from datetime import date


class BaiduSpiderSpider(CrawlSpider):
    """Get news from news.baidu.com
    """
    name = "home"
    allowed_domains = ["news.baidu.com"]
    stopwords = []

    custom_settings = {
        'HTTPERROR_ALLOWED_CODES': [302, 301],
    }

    def start_requests(self):
        # get stopwords to analyze
        print("Get CRON")

        need_titles = ['焦虑']
        for title in need_titles:
            for i in range(40):
                will_url = str(
                    'https://www.baidu.com/s?tn=news&rtt=4&bsst=1&cl=2&wd='
                    + title
                    + '&medium=0&x_bfe_rqs=03E80&x_bfe_tjscore=0.000000&tngroupname=organic_news&newVideo=12&rsv_dl=news_b_pn&pn='
                    + str(10 * i))
                yield scrapy.Request(will_url, callback=self.parse, dont_filter=True, meta={
                    'key_words': title,
                    'circle': i,
                })

    def clean_html_tag(self, text: str):
        content = ""
        if text:
            text = text.strip()
            reg = re.compile('<[^>]*>')
            content = reg.sub('', text).replace('\n', '').replace(' ', '')
        return content

    def parse(self, response):
        """获取新闻列表，继续读取新闻列表的[发布时间，发布源，url，标题]
        """

        circle = response.meta['circle']
        content = response.css("div#content_left")[0]
        for i in range(10):
            page_num = circle * 10 + i + 1
            news_info = content.css('#' + str(page_num))
            title_html = news_info.css("h3 a").extract_first()
            title = self.clean_html_tag(title_html)
            author_relative_time = news_info.css('div.news-source span::text').extract()
            news_url = news_info.css('a::attr(href)').extract_first()
            time = ""
            if "前" in author_relative_time[1]:
                time = date.today()
            else:
                time = author_relative_time[1]
            yield scrapy.Request(news_url, callback=self.news_detail, dont_filter=True, meta={
                'title': title,
                'key_words': response.meta['key_words'],
                'author': author_relative_time[0],
                'time': time,
            })

    def news_detail(self, response):
        """获取新闻具体的文本"""
        p_article = response.css('p::text').extract()
        text_list = []
        for text in p_article:
            text: str = text.strip()
            if text:
                text = text.replace(';', '，').replace(',', '，')
                text_list.append(text)
        save_item = {}
        save_item['url'] = response.url
        save_item['title'] = response.meta['title']
        save_item['key_words'] = response.meta['key_words']
        save_item['author'] = response.meta['author']
        save_item['time'] = response.meta['time']
        save_item['news_detail'] = ";".join(text_list)
        yield save_item
