#-- coding: utf-8 --

#@File : quick_news_futunn_com.py
#@Software : PyCharm
#@Author : Silva
#@Email : for_billy@163.com
#@Time : 2023/9/30 上午9:54


import re
import math
import scrapy
import json
from urllib.parse import urlsplit
from parsel import  Selector
from ..utils import date, over_page, date2time
from ..items import PostNewsItem, newsItemLoader, QuickNewsItem
from ..package.rules.utils import urljoin
from ..package.rules import TitleRules, PublishDateRules, ContentRules, AuthorExtractor


class NewsFillterSpider(scrapy.Spider):
    name = 'newsfilter.io'
    allowed_domains = ['newsfilter.io']
    site_name = 'newsfilter'
    site_type = 'quick'
    language = 'en'
    title_rules = TitleRules()
    publish_date_rules = PublishDateRules()
    author_rules = AuthorExtractor()

    # 按模块抓取
    start_urls = ['https://static.newsfilter.io/landing-page/summary-of-briefs.html']
    breaf_urls = ['https://static.newsfilter.io/landing-page/briefs.json']

    def __init__(self, task_id='', *args, **kwargs):
        super().__init__(*args, **kwargs)  # <- important
        self.task_id = task_id
        self.site_type = 'quick'


    def start_requests(self):
        for first_url in self.start_urls:
            yield scrapy.Request(first_url, callback=self.parse_first)

        # json数据
        for breaf_url in self.breaf_urls:
            yield scrapy.Request(breaf_url, callback=self.parse_breaf)

    def parse_first(self, response):
        # 解析当前列表页
        news_list = response.xpath('//ul/li/text()').getall()
        for news in news_list:
            # 首页数据单独解析
            yield from self.parse_html_detail(news)

    def parse_breaf(self, response):
        # 解析当前列表页
        breaf_data = response.json()
        for breaf in breaf_data:
            yield from self.parse_breaf_detail(breaf)


    def parse_html_detail(self, new_text):
        item = newsItemLoader(item=QuickNewsItem())
        detail_url = ''
        content = new_text
        content = f'<p>{content}</p>'
        item.add_value('content', content)  # 正文内容/text_content
        item.add_value('originalContent', content)
        item.add_value('source', 'NehStock')  # 资讯源
        item.add_value('originalUrl', detail_url)  # 原文url
        yield item.load_item()


    def parse_breaf_detail(self, breaf):
        item = newsItemLoader(item=QuickNewsItem())
        title = breaf['article']['title']
        content = title if title else breaf['text']
        content = f'<p>{content}</p>'
        item.add_value('content', content)  # 正文内容/text_content
        item.add_value('title', title)  # 标题
        item.add_value('originalContent', content)
        item.add_value('source', 'NehStock')  # 资讯源
        item.add_value('originalUrl', breaf['article']['url'])  # 原文url
        yield item.load_item()
