# -- coding: utf-8 --

# @File : post_cafe_naver_com_likeusstock.py
# @Software : PyCharm
# @Author : Silva
# @Email : for_billy@163.com
# @Time : 2023/9/12 下午10:54

import re
import math
import scrapy
import json
from urllib.parse import urlsplit

from ..utils import date, over_page, date2time, prettify_css_html
from ..items import PostNewsItem, newsItemLoader
from ..package.rules.utils import urljoin
from ..package.rules import TitleRules, PublishDateRules, ContentRules, AuthorExtractor


class CafeNaverComSpider(scrapy.Spider):
    name = 'cafe.naver.com.likeusstock'
    allowed_domains = ['naver.com']
    site_name = '美股是未来'
    site_type = 'post'
    title_rules = TitleRules()
    publish_date_rules = PublishDateRules()
    author_rules = AuthorExtractor()
    language = 'ko'

    # 按模块抓取
    start_urls = [
        ["미국주식", "포트폴리오",
         "https://cafe.naver.com/ArticleList.nhn?search.clubid=28497937&search.menuid=49&userDisplay=50&search.boardtype=L&search.specialmenutype=&search.totalCount=501&search.cafeId=28497937&search.page=1"],

        ["미국주식", "실적발표",
         "https://cafe.naver.com/ArticleList.nhn?search.clubid=28497937&search.menuid=104&userDisplay=50&search.boardtype=L&search.specialmenutype=&search.totalCount=501&search.cafeId=28497937&search.page=1"],

        ["미국주식", "분석",
         "https://cafe.naver.com/ArticleList.nhn?search.clubid=28497937&search.menuid=103&userDisplay=50&search.boardtype=L&search.specialmenutype=&search.totalCount=501&search.cafeId=28497937&search.page=1"],

        ["미국주식", "배당",
         "https://cafe.naver.com/ArticleList.nhn?search.clubid=28497937&search.menuid=115&userDisplay=50&search.boardtype=L&search.specialmenutype=&search.totalCount=501&search.cafeId=28497937&search.page=1"],

        ["미국주식", "채권",
         "https://cafe.naver.com/ArticleList.nhn?search.clubid=28497937&search.menuid=139&userDisplay=50&search.boardtype=L&search.specialmenutype=&search.totalCount=501&search.cafeId=28497937&search.page=1"],

        ["미국주식", "선물옵션",
         "https://cafe.naver.com/ArticleList.nhn?search.clubid=28497937&search.menuid=126&userDisplay=50&search.boardtype=L&search.specialmenutype=&search.totalCount=501&search.cafeId=28497937&search.page=1"],

    ]

    def __init__(self, task_id='', *args, **kwargs):
        super().__init__(*args, **kwargs)  # <- important
        self.task_id = task_id
        self.site_type = 'post'
        self.need_proxy = True


    def start_requests(self):
        for url_item in self.start_urls:
            source, subSource, url = url_item
            # 若不需要用到num来传递次数，则可删去
            meta = {'source': source, "subSource": subSource}
            yield scrapy.Request(url, callback=self.parse, meta=meta)

    def transfomate_detail_url(self, detail_url):
        articleid = re.findall('articleid=(\d+)', detail_url)[0]
        menuid = re.findall('menuid=(\d+)', detail_url)[0]
        api_url = f'https://apis.naver.com/cafe-web/cafe-articleapi/v2.1/cafes/28497937/articles/{articleid}?menuId={menuid}'
        return api_url

    def parse(self, response):
        # 解析当前列表页
        tds = response.xpath('//td[@class="td_article"]')
        # print(response.xpath('//td//a/@href'), 4444444444)
        for td in tds:
            detail_url = td.xpath('.//div[@class="inner_list"]/a[@class="article"]/@href').get()
            detail_url = self.transfomate_detail_url(detail_url)
            yield response.follow(detail_url, callback=self.parse_detail, meta=response.meta)

        # 翻页至下一页
        if 'search.page=1' in response.url:
            for num in range(2, 10):
                next_page = response.url.replace('search.page=1', f"search.page={num}")
                yield from over_page(next_page, response, page_num=num, callback=self.parse)


    def parse_detail(self, response):
        item = newsItemLoader(item=PostNewsItem(), selector=response, response=response)
        result = response.json()
        article = result['result']['article']
        if article.get('scrap'):
            contentHtml = article['scrap']['contentHtml']
        else:
            contentHtml = article['contentHtml']
        # 通用提取规则
        content = prettify_css_html(contentHtml)
        # content_rules = ContentRules()  # 正文初始化 每次都需要初始化
        item.add_value('content', content)  # 正文内容/text_content
        item.add_value('originalContent', contentHtml)  # 正文内容/text_content
        # originalContent = scrapy.Field()  # 帖子原文
        item.add_value('source', response.meta['source'])  # 一级类目
        item.add_value('subSource', response.meta['subSource'])  # 二级类目
        item.add_value('originalUrl', response.url)  # 原文url
        return item.load_item()
