#-- coding: utf-8 --

#@File : post_moomoo_com.py
#@Software : PyCharm
#@Author : Silva
#@Email : for_billy@163.com
#@Time : 2023/9/30 上午9:54


import re
import math
import scrapy
import json
from urllib.parse import urlsplit
from parsel import  Selector

from ..utils import date, over_page, date2time, prettify_css_html
from ..items import PostNewsItem, newsItemLoader
from ..package.rules.utils import urljoin
from ..package.rules import TitleRules, PublishDateRules, ContentRules, AuthorExtractor
from ..package.oss_util import OssClient
from ..settings import PAGE_LIMIT

oss_client = OssClient(bizType='post')

class MoomooSpider(scrapy.Spider):
    name = 'www.moomoo.com'
    allowed_domains = ['moomoo.com']
    site_name = 'moomoo'
    site_type = 'post'
    language = 'zh-CHS'
    title_rules = TitleRules()
    publish_date_rules = PublishDateRules()
    author_rules = AuthorExtractor()
#     custom_settings = {
#         "DEFAULT_REQUEST_HEADERS": {
#           'Accept': 'application/json, text/plain, */*',
#           'Accept-Encoding': 'gzip, deflate, br',
#           'Accept-Language': 'zh-CN,zh;q=0.9',
#           'Cache-Control': 'no-cache',
#           'Cookie': 'cipher_device_id=1696034467486612; device_id=1696034467486612; csrfToken=CZfVL3kgVHMfkERJPsztF3TU; _gcl_au=1.1.1378372570.1696034446.305854172.1696610784.1696610810; _gid=GA1.2.1062402564.1697548960; uid=73239925; web_sig=HufbvQN%2FqOwAsrfAywOhf42Hs7Kd4heIMbaCHRg3i0K8VNiXwDYFAz0sdlpr1h1yQjHUS7JJtPCOt0zjNLHvnNgWwl2PCLMDgQZgzB5W8vSXE3GJjo96HtfsNvkwpJVAcuWI9Igo00VqXw6r%2FI9E%2Bg%3D%3D; _clck=12wx1ho|2|ffx|0|1368; passport_dp_data=f73d09737cb42ab702702eea8eb4c9b76a4a949579d25352ca429ec4a913041e7de02061ac03b291f2072ebf81db83956476bfc71b4f66a2615444d7c3845623977fc4dfefedba63; data_tracking_global_content=%7B%22promote_id%22%3A13764%2C%22sub_promote_id%22%3A62%7D; _gat_gtag_UA_137699611_5=1; us_cid=73239925; us_broker_web_sig=hr%2F5jXyi%2FpM9bhGKkJyMdlzMS9R6ciCwxt9kYgf2VzO9FI2hGvDuTA2kj3CUd1OD6N3WeetrfWyc2IbH2qrDliyA5JmdHIDtYiBWr6hQXTFFyxyCvoL04mFCa%2Frc8YCdpFYbA6kKbwe97s2b4Id6nw%3D%3D; _ga_DXBNHXDWEN=GS1.1.1697549352.1.0.1697549357.0.0.0; _ga_ZHE4KJQ4SF=GS1.1.1697549352.1.0.1697549357.55.0.0; news-locale=zh-cn; futu-offline-csrf-v2=%2BtckjHt6IcW%2F0eDrFEjAMQ%3D%3D; _gat_UA-137699611-5=1; _clsk=6newor|1697549381241|9|1|p.clarity.ms/collect; tgw_l7_route=eeb833543101a256c422a2afac6452fd; locale=zh-cn; locale.sig=_8-JHymmrgcL5ROK0F6Mu8XEiGiPWj3juJFFOsvEScI; sensorsdata2015jssdkcross=%7B%22distinct_id%22%3A%22ftv13u51Rg9vYP1%2BuVGiWO%2FzPd2S%2F56BpWMr7E4OsHvC%2Bo6nxwpjcxXNJBVcgFDbPuS5%22%2C%22first_id%22%3A%2218ae3883001792-0e7a6a9d2e908a-18525634-2304000-18ae3883002df8%22%2C%22props%22%3A%7B%22%24latest_traffic_source_type%22%3A%22%E7%9B%B4%E6%8E%A5%E6%B5%81%E9%87%8F%22%2C%22%24latest_search_keyword%22%3A%22%E6%9C%AA%E5%8F%96%E5%88%B0%E5%80%BC_%E7%9B%B4%E6%8E%A5%E6%89%93%E5%BC%80%22%2C%22%24latest_referrer%22%3A%22%22%7D%2C%22identities%22%3A%22eyIkaWRlbnRpdHlfY29va2llX2lkIjoiMThhZTM4ODMwMDE3OTItMGU3YTZhOWQyZTkwOGEtMTg1MjU2MzQtMjMwNDAwMC0xOGFlMzg4MzAwMmRmOCIsIiRpZGVudGl0eV9sb2dpbl9pZCI6ImZ0djEzdTUxUmc5dllQMSt1VkdpV08velBkMlMvNTZCcFdNcjdFNE9zSHZDK282bnh3cGpjeFhOSkJWY2dGRGJQdVM1In0%3D%22%2C%22history_login_id%22%3A%7B%22name%22%3A%22%24identity_login_id%22%2C%22value%22%3A%22ftv13u51Rg9vYP1%2BuVGiWO%2FzPd2S%2F56BpWMr7E4OsHvC%2Bo6nxwpjcxXNJBVcgFDbPuS5%22%7D%2C%22%24device_id%22%3A%2218ae3883001792-0e7a6a9d2e908a-18525634-2304000-18ae3883002df8%22%7D; ftreport-jssdk%40session={%22distinctId%22:%22ftv13u51Rg9vYP1+uVGiWO/zPXZateZNyxCjBpelAv4IN42nxwpjcxXNJBVcgFDbPuS5%22%2C%22firstId%22:%22ftv15PI1z1gKA7tD+G6WIakAgDGkK6aFBY+LtvH5t4GERFPgxR1kQoZKq986291KBxU7%22%2C%22latestReferrer%22:%22https://www.moomoo.com/hans/news/main/topic?lang=zh-cn&global_content={%5C%22promote_id%5C%22%253A13764%2C%5C%22sub_promote_id%5C%22%253A9}%22}; _gat_UA-137699611-6=1; _ga_KER2R4QZ41=GS1.1.1697548960.11.1.1697549387.49.0.0; _ga_Q2LPFH9N81=GS1.1.1697548960.11.1.1697549387.0.0.0; _ga_TCL6XGRYQP=GS1.1.1697548960.11.1.1697549387.49.0.0; _ga=GA1.2.1873448285.1696034447; _ga_QMQR1WC63N=GS1.2.1697548961.8.1.1697549391.0.0.0; _ga_25WYRC4KDG=GS1.1.1697548960.11.1.1697549391.0.0.0; _ga_76MJLWJGT4=GS1.2.1697548961.8.1.1697549391.49.0.0; _uetvid=ff3daae05f2911eeb1b5e758143a8ba0; _uetsid=3f73fb406cf011ee9f740fe85071f607',
#           'Pragma': 'no-cache',
#           'Referer': 'https://www.moomoo.com/hans/community/nnq?global_content=%7B%22promote_id%22%3A13764,%22sub_promote_id%22%3A9%7D',
#           'Sec-Ch-Ua': '"Chromium";v="118", "Google Chrome";v="118", "Not=A?Brand";v="99"',
#           'Sec-Ch-Ua-Mobile': '?0',
#           'Sec-Ch-Ua-Platform': '"macOS"',
#           'Sec-Fetch-Dest': 'empty',
#           'Sec-Fetch-Mode': 'cors',
#           'Sec-Fetch-Site': 'same-origin',
#           'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/118.0.0.0 Safari/537.36',
#           'X-Futu-Nnq-New-Website': 'nnq'
# }
#     }
    # 按模块抓取
    start_urls = ['https://www.moomoo.com/hans/community/nnq']

    def __init__(self, task_id='', *args, **kwargs):
        super().__init__(*args, **kwargs)  # <- important
        self.task_id = task_id
        self.page_num = 0
        self.site_type = 'post'


    def start_requests(self):
        for first_url in self.start_urls:
            yield scrapy.Request(first_url, callback=self.parse_first)


    def parse_first(self, response):
        # 解析当前列表页
        first_page_data = re.findall('window.__INITIAL_STATE__=(.*?);\(function', response.text, re.S)
        if not first_page_data:
            return
        first_page_data = json.loads(first_page_data[0])

        yield from self.parse_first_page_json(first_page_data)

    def parse_first_page_json(self, page_data):

        feeds = page_data['feedData']['feed']
        for feed in feeds:
            feed_id = feed['common']['feed_id']
            feed_title = feed['common']['feed_title']
            url_slugname = feed['common']['url_slugname']
            if not feed_title:
                continue
            nick_name = feed['user_info']['nick_name']
            # detail_url = f'https://www.moomoo.com/hans/community/feed/{feed_id}'
            detail_url = f'https://www.moomoo.com/hans/community/feed/{url_slugname}-{feed_id}'
            meta = {"subSource": nick_name, 'feed_title': feed_title}
            yield scrapy.Request(detail_url, callback=self.parse_detail, meta=meta)

        # 下一页的参数
        has_more = page_data['feedData']['has_more']
        if has_more != 1:
            return
        self.page_num = 2
        more_mark = page_data['feedData']['more_mark']
        sequence = page_data['feedData']['sequence']
        next_page_url = f'https://www.moomoo.com/hans/community/nnq/feed-list?type=100&num=10&load_list_type=1&more_mark={more_mark}&sequence={sequence}&refresh_cycle_info=%7B%22cycle_info%22:%22GNXG1iEY0Jf7IRjw3bgw%22,%22refresh_cycle%22:0%7D'
        yield scrapy.Request(next_page_url, callback=self.parse_next_page_json)

    def parse_next_page_json(self, response):
        next_page_data = response.json()
        has_more = next_page_data['has_more']
        if has_more != 1:
            return
        more_mark = next_page_data['more_mark']
        sequence = next_page_data['sequence']
        feeds = next_page_data['feed']
        for feed in feeds:
            feed_id = feed['common']['feed_id']
            feed_title = feed['common']['feed_title']
            url_slugname = feed['common']['url_slugname']

            # 没有标题的帖子是评论
            if not feed_title:
                continue
            nick_name = feed['user_info']['nick_name']
            # detail_url = f'https://www.moomoo.com/hans/community/feed/{feed_id}?global_content=%7B%22promote_id%22%3A13764%2C%22sub_promote_id%22%3A62%7D'
            detail_url = f'https://www.moomoo.com/hans/community/feed/{url_slugname}-{feed_id}'
            meta = {"subSource": nick_name, 'feed_title': feed_title}
            yield scrapy.Request(detail_url, callback=self.parse_detail, meta=meta)

        if self.page_num > PAGE_LIMIT and PAGE_LIMIT > 0:
            # 页数限制
            return
        self.page_num += 1
        next_page_url = f'https://www.moomoo.com/hans/community/nnq/feed-list?type=100&num=10&load_list_type=1&more_mark={more_mark}&sequence={sequence}&refresh_cycle_info=%7B%22cycle_info%22:%22GNXG1iEY0Jf7IRjw3bgw%22,%22refresh_cycle%22:0%7D'
        yield scrapy.Request(next_page_url, callback=self.parse_next_page_json)



    def parse_detail(self, response):
        item = newsItemLoader(item=PostNewsItem(), selector=response, response=response)
        feed_detail = response.xpath('//div[@class="feed-detail"]')
        # 正文源码
        images = []
        content_text = feed_detail.extract_first()
        if not content_text:
            with open('test.html', 'w') as fp:
                fp.write(response.text)
            print('feed_detail', feed_detail)
            return
        content = prettify_css_html(content_text)
        item.add_value('content', content)  # 正文内容/text_content
        item.add_value('originalContent', content_text)  # 正文内容/text_content
        item.add_value('source', 'MOMO')  # 一级类目
        item.add_value('originalUrl', response.url)  # 原文url
        item.add_value('images', images)  # 标题/title

        # item.add_value('title', response.meta['feed_title'])  # 标题/title
        item.add_value('subSource', response.meta['subSource'])  # 二级类目
        return item.load_item()
