#-- coding: utf-8 --

#@File : quick_news_futunn_com.py
#@Software : PyCharm
#@Author : Silva
#@Email : for_billy@163.com
#@Time : 2023/9/30 上午9:54


import re
import math
import scrapy
import json
from urllib.parse import urlsplit
from parsel import  Selector

from ..settings import PAGE_LIMIT
from ..utils import date, over_page, date2time
from ..items import PostNewsItem, newsItemLoader, QuickNewsItem
from ..package.rules.utils import urljoin
from ..package.rules import TitleRules, PublishDateRules, ContentRules, AuthorExtractor


class NewsFutunnSpider(scrapy.Spider):
    name = 'news.futunn.com'
    allowed_domains = ['futunn.com']
    site_name = '富途牛牛-快讯'
    site_type = 'quick'
    language = 'zh-CHS'
    title_rules = TitleRules()
    publish_date_rules = PublishDateRules()
    author_rules = AuthorExtractor()

    # 按模块抓取
    start_urls = ['https://news.futunn.com/main/live?lang=zh-cn']

    def __init__(self, task_id='', *args, **kwargs):
        super().__init__(*args, **kwargs)  # <- important
        self.task_id = task_id
        self.site_type = 'quick'
        self.page_num = 1


    def start_requests(self):
        for first_url in self.start_urls:
            yield scrapy.Request(first_url, callback=self.parse_first)

    def parse_first(self, response):
        # 解析当前列表页
        news_url_list = response.xpath('//div[@class="flash-info"]/a')
        for news_url_a in news_url_list:
            # 首页数据单独解析
            yield from self.parse_html_detail(news_url_a)
        seqMark = re.findall('seqMark:"(\w+)', response.text, re.S)[0]
        next_page_url = f'https://news.futunn.com/news-site-api/main/get-flash-list?pageSize=50&seqMark={seqMark}'
        yield scrapy.Request(next_page_url, callback=self.parse_next_page_json)

    def parse_html_detail(self, news_url_a):
        item = newsItemLoader(item=QuickNewsItem())
        detail_url = news_url_a.xpath('./@href').get()
        detail_url = urljoin('https://news.futunn.com/flash/15641413', detail_url)
        h3_text = news_url_a.xpath('./h3/text()').get()
        p_text = news_url_a.xpath('./p/text()').get()
        content = h3_text if h3_text else p_text
        content = f'<p>{content}</p>'
        item.add_value('content', content)  # 正文内容/text_content
        item.add_value('originalContent', content)
        item.add_value('source', 'NehStock')  # 资讯源
        item.add_value('originalUrl', detail_url)  # 原文url
        yield item.load_item()

    def parse_next_page_json(self, response):
        next_page_data = response.json()
        news_list = next_page_data['data']['data']['news']
        for news in news_list:
            yield from self.parse_detail(news)

        if self.page_num > PAGE_LIMIT and PAGE_LIMIT > 0:
            # 页数限制
            return
        self.page_num += 1
        hasMore = next_page_data['data']['data']['hasMore']
        seqMark = next_page_data['data']['data']['seqMark']
        if hasMore:
            # 下一页
            next_page_url = f'https://news.futunn.com/news-site-api/main/get-flash-list?pageSize=50&seqMark={seqMark}'
            yield scrapy.Request(next_page_url, callback=self.parse_next_page_json)


    def parse_detail(self, news):
        item = newsItemLoader(item=QuickNewsItem())
        title = news['title']
        content = title if title else news['content']
        content = f'<p>{content}</p>'
        item.add_value('content', content)  # 正文内容/text_content
        item.add_value('title', title)  # 标题
        item.add_value('originalContent', content)
        item.add_value('source', 'NehStock')  # 资讯源
        item.add_value('originalUrl', news['detailUrl'])  # 原文url

        yield item.load_item()
