from abc import ABC
from urllib.parse import quote_plus

import scrapy

from .news_parser import NewsParser
from .util import extract_context
from ..items import InfoExtractItem

BAIDU_NEWS_SITE = 'https://www.baidu.com/s?ie=utf-8&cl=2&rtt=1&bsst=1&tn=news&word='
ORIGIN_DATA_PATH = '/Users/weijinqian/Documents/myarea/spider/info_extract/info_extract/data/rel_test.txt'


class InfoSpider(scrapy.Spider, ABC):
    name = 'info'

    def __init__(self, name=None, **kwargs):
        super().__init__(name, **kwargs)
        # 新闻链接返回结果解析器
        self.parser = NewsParser()

    def start_requests(self):
        """
        这个东西是一个两重url访问：
        1. 一层是从百度资讯站内抓取相关的资讯链接，
        2. 二层则是抓取相关的资讯连接
        :return:
        """
        with open(ORIGIN_DATA_PATH, 'r') as rf:
            for line in rf:
                line = line.strip().split('###')
                keywords = line[:-2]
                search_body = '+'.join([quote_plus(wd) for wd in keywords])
                urls = []
                # 一对keyword 使用多个页的结果
                for page in range(0, 101, 20):
                    url = BAIDU_NEWS_SITE + search_body + '&tngroupname=organic_news&pn=' + str(page)
                    urls.append(url)

                for url in urls:
                    param = {'url': url,
                             'keyword': ' '.join(keywords)}
                    yield scrapy.Request(url=url, meta=param, callback=self.baidu_news_parse, dont_filter=True)

    def baidu_news_parse(self, response):
        """
        百度资讯链接解析
        :param response:
        :return:
        """
        news_links = response.xpath('//div[@class="result-op c-container xpath-log new-pmd"]')
        for link in news_links:
            link = link.xpath('./@mu').extract_first()
            param = {'url': link,
                     'keyword': response.meta['keyword']}
            yield scrapy.Request(url=link, meta=param, callback=self.news_parse, dont_filter=True)

    def news_parse(self, response):
        data = self.parser.extract_news(response.text)
        if data:
            item = InfoExtractItem()
            item['keyword'] = response.meta['keyword']
            item['news_url'] = response.meta['url']
            item['news_time'] = data['news_pubtime']
            item['news_date'] = data['news_date']
            item['news_title'] = data['news_title']
            keywords = item['keyword'].split(' ')
            result = []
            for text in extract_context(data['news_content'], keywords[0], keywords[1]):
                result.append(text)
            item['news_content'] = '\t'.join(result)
            yield item
        return
