#-- coding: utf-8 --

#@File : post_q_futunn_com.py.py
#@Software : PyCharm
#@Author : Silva
#@Email : for_billy@163.com
#@Time : 2023/9/30 上午9:54


import re
import math
import uuid

import requests
import scrapy
import json

from ..utils import date, over_page, date2time, prettify_css_html
from ..items import PostNewsItem, newsItemLoader
from ..settings import PAGE_LIMIT
from ..package.rules.utils import urljoin
from ..package.rules import TitleRules, PublishDateRules, ContentRules, AuthorExtractor


class Qfutunn(scrapy.Spider):
    name = 'q.futunn.com'
    allowed_domains = ['q.futunn.com']
    site_name = '富途牛牛-帖子'
    site_type = 'post'
    language = 'zh-CHS'
    title_rules = TitleRules()
    publish_date_rules = PublishDateRules()
    author_rules = AuthorExtractor()

    # 按模块抓取
    start_urls = ['https://q.futunn.com/nnq']

    def __init__(self, task_id='', *args, **kwargs):
        super().__init__(*args, **kwargs)  # <- important
        self.task_id = task_id
        self.page_num = 0
        self.site_type = 'post'


    def start_requests(self):
        for first_url in self.start_urls:
            yield scrapy.Request(first_url, callback=self.parse_first)


    def parse_first(self, response):
        # 解析当前列表页
        first_page_data = re.findall('window.__INITIAL_STATE__=(.*?);\(function', response.text, re.S)
        if not first_page_data:
            print('Cookie过期或者页面出错！')
            return
        first_page_data = json.loads(first_page_data[0])

        yield from self.parse_first_page_json(first_page_data)

    def parse_first_page_json(self, page_data):

        feeds = page_data['feedData']['feed']
        for feed in feeds:
            feed_id = feed['common']['feed_id']
            feed_title = feed['common']['feed_title']
            if not feed_title:
                continue
            nick_name = feed['user_info']['nick_name']
            detail_url = f'https://q.futunn.com/feed/{feed_id}?global_content=%7B%22promote_id%22%3A13766%2C%22sub_promote_id%22%3A38%7D'
            meta = {"subSource": nick_name, 'feed_title': feed_title}
            detail_url = 'https://q.futunn.com/feed/111185108599188?global_content=%7B%22promote_id%22%3A13766%2C%22sub_promote_id%22%3A38%7D'
            yield scrapy.Request(detail_url, callback=self.parse_detail, meta=meta)

        # 下一页的参数
        has_more = page_data['feedData']['has_more']
        if has_more != 1:
            return
        more_mark = page_data['feedData']['more_mark']
        sequence = page_data['feedData']['sequence']
        next_page_url = f'https://q.futunn.com/nnq/feed-list?type=100&num=10&load_list_type=1&more_mark={more_mark}&sequence={sequence}&refresh_cycle_info=%7B%22cycle_info%22:%22%22,%22refresh_cycle%22:0%7D'
        self.page_num = 2
        yield scrapy.Request(next_page_url, callback=self.parse_next_page_json)

    def parse_next_page_json(self, response):
        next_page_data = response.json()
        has_more = next_page_data['has_more']
        if has_more != 1:
            return
        more_mark = next_page_data['more_mark']
        sequence = next_page_data['sequence']
        feeds = next_page_data['feed']
        for feed in feeds:
            feed_id = feed['common']['feed_id']
            feed_title = feed['common']['feed_title']
            # 没有标题的帖子是评论
            if not feed_title:
                continue
            nick_name = feed['user_info']['nick_name']
            detail_url = f'https://q.futunn.com/feed/{feed_id}?global_content=%7B%22promote_id%22%3A13766%2C%22sub_promote_id%22%3A38%7D'
            meta = {"subSource": nick_name, 'feed_title': feed_title}
            yield scrapy.Request(detail_url, callback=self.parse_detail, meta=meta)
        if self.page_num > PAGE_LIMIT and PAGE_LIMIT > 0:
            # 页数限制
            return
        self.page_num += 1
        next_page_url = f'https://q.futunn.com/nnq/feed-list?type=100&num=10&load_list_type=1&more_mark={more_mark}&sequence={sequence}&refresh_cycle_info=%7B%22cycle_info%22:%22%22,%22refresh_cycle%22:0%7D'
        yield scrapy.Request(next_page_url, callback=self.parse_next_page_json)



    def parse_detail(self, response):
        item = newsItemLoader(item=PostNewsItem(), selector=response, response=response)
        feed_detail = response.xpath('//div[@class="feed-detail"]')
        # 正文源码
        images = []
        content_text = feed_detail.extract_first()
        content = prettify_css_html(content_text)
        item.add_value('content', content)  # 正文内容/text_content
        item.add_value('originalContent', content_text)  # 正文内容/text_content
        item.add_value('source', '富途牛牛')  # 一级类目
        item.add_value('originalUrl', response.url)  # 原文url
        item.add_value('images', images)  # 标题/title

        # item.add_value('title', response.meta['feed_title'])  # 标题/title
        item.add_value('subSource', response.meta['subSource'])  # 二级类目
        return item.load_item()


