# -*- coding: utf-8 -*-
import scrapy
from inter_game_spider import settings
from utils import help
from inter_game_spider.items import InfoItem, IgnoreLinkItem
import json
import logging
import re
logger = logging.getLogger(__name__)


class ThestreetSpiderSpider(scrapy.Spider):
    name = 'thestreet_spider'
    allowed_domains = ['thestreet_com']
    base_api = "https://www.thestreet.com/quote/%s/details/news?start=%d&type=json"
    page_start_num = 0
    batch_size = 30

    def start_requests(self):
        company_name_list = help.get_company_code()
        for company in company_name_list:
            company_code = company[0]
            logger.info("crawling:%s" % company_code)
            # for i in range(self.start_page, self.max_pages):
            url = self.base_api % (company_code, self.page_start_num)
            logger.info('crawling: %s' % url)
            yield scrapy.Request(url=url, callback=self.parse, dont_filter=True, meta={"code": company_code,
                                                                                       "current_num": self.page_start_num})

    def parse(self, response):
        html = response.body
        dic_msg = json.loads(html)
        pagination = dic_msg["pagination"]
        next_num = int(pagination["nextPageUrl"].split('=')[-1])
        current_num = response.meta["current_num"]
        code = response.meta["code"]
        if current_num < next_num:
            # 还有下一页
            msg_list = dic_msg["stories"]
            for msg in msg_list:
                title = msg["headline"]
                link = msg["url"]
                if 'http' not in link:
                    link = 'https://www.thestreet.com' + link
                pub_time = msg["publishDate"]
                platform = settings.PLATFORM_THE_STREET
                company = code
                news_type = msg["subcategoryName"]
                meta = {
                    "platform": platform,
                    "title": title,
                    "link": link,
                    "news_type": news_type,
                    "pub_time": pub_time,
                    "code": code
                }
                # 走文章详情
                yield scrapy.Request(url=link, callback=self.parse_detail, dont_filter=True, meta=meta)
            current_num = current_num + self.batch_size
            next_url = self.base_api % (code, current_num)
            # 继续迭代
            yield scrapy.Request(url=next_url, callback=self.parse,
                                 dont_filter=True, meta={"current_num": current_num, "code": code})
        else:
            print "...company (%s) crawl end...."

    def parse_detail(self, response):
        meta = response.meta
        pub_time = meta["pub_time"]
        title = meta["title"]
        link = meta["link"]

        content = response.xpath('//div[@class="page-item"]//p/text()').extract()
        content = ''.join(content)
        content = self.deal_str(content)

        platform = meta["platform"]
        author = response.xpath('//a[@class="article__author-link"]/span/text()').extract()
        author = self.deal_author(author)
        company = meta["code"]
        news_type = meta["news_type"]
        record_type = settings.NEWS_TYPE_ARTICLE
        create_date = help.get_current_time()

        item = InfoItem()
        item["pub_time"] = self.deal_pub_time(pub_time)
        item["title"] = self.deal_str(title)
        item["link"] = link
        item["content"] = content
        item["platform"] = platform
        item["author"] = author
        item["company"] = company
        item["news_type"] = self.deal_str(news_type)
        item["record_type"] = record_type
        item["create_date"] = create_date
        yield item

        ignore_link_item = IgnoreLinkItem()
        ignore_link_item["link"] = link
        ignore_link_item["create_date"] = create_date
        yield ignore_link_item

    def deal_pub_time(self, pub_time):
        pub_time = pub_time.replace("T", ' ')
        pub_time = pub_time.replace("Z", "")
        return pub_time

    def deal_author(self, author):
        if len(author) < 2:
            return None
        return self.deal_str(author[1])

    def deal_str(self, data):
        """
        处理字符串
        :param data:
        :return:
        """
        if data is None:
            return None
        data = data.replace('\n', '')
        data = data.replace('\t', '')
        data = data.replace('\r', '')
        return data