# -*- coding: utf-8 -*-
import scrapy
from inter_game_spider import settings
from utils import help
from inter_game_spider.items import InfoItem, IgnoreLinkItem
import logging
import re
import json
logger = logging.getLogger(__name__)


class MarketrealistSpiderSpider(scrapy.Spider):
    name = 'marketrealist_spider'
    allowed_domains = ['marketrealist.com']
    base_url = 'https://api.marketrealist.com/api/search/posts?text=%s&limit=%d&page=%d'
    # 一批多少条数据
    batch = 10
    start_num = 1

    def start_requests(self):
        company_name_list = help.get_company_code()
        for company in company_name_list:
            company_code = company[0]
            logger.info("crawling:%s" % company_code)
            # for i in range(self.start_page, self.max_pages):
            url = self.base_url % (company_code, self.batch, self.start_num)
            logger.info('crawling: %s' % url)
            yield scrapy.Request(url=url, callback=self.parse_nums, dont_filter=True, meta={"code": company_code})

    def parse_nums(self, response):
        """
        获取页面数量...
        :param response:
        :return:
        """
        html = response.body
        msg = json.loads(html)
        post_counts = msg["posts_count"]
        # 总共多少页
        pages = post_counts / self.batch + 1
        company_code = response.meta["code"]
        if pages < 2:
            print("%s company has no pages..." % company_code)
        else:
            # 重构url
            url = self.base_url % (company_code, self.batch, self.start_num)
            yield scrapy.Request(url=url, callback=self.parse, dont_filter=True, meta={"code": company_code, "pages": pages})

    def parse(self, response):
        pages = response.meta["pages"]
        company_code = response.meta["code"]
        for i in range(1, pages + 1):
            url = self.base_url % (company_code, self.batch, i)
            yield scrapy.Request(url=url, callback=self.parse_list, dont_filter=True,
                                 meta={"code": company_code})

    def parse_list(self, response):
        html = response.body
        msg = json.loads(html)
        items = msg["posts"]
        company_code = response.meta["code"]
        for item in items:
            title = item["title"]
            link = item["link"]
            author = item["author"]["name"]
            content = self.deal_str(self.filter_html(item["content"]))
            pub_time = item["published_date"]
            company = company_code
            platform = settings.PLATFORM_MARKETREALIST
            record_type = settings.NEWS_TYPE_ARTICLE
            news_type = item["slug"]
            create_date = help.get_current_time()

            # 下面封装好item 送往数据库
            info_item = InfoItem()
            info_item["pub_time"] = pub_time
            info_item["title"] = self.deal_str(title)
            info_item["link"] = link
            info_item["content"] = content
            info_item["platform"] = platform
            info_item["author"] = self.deal_str(author)
            info_item["company"] = company
            info_item["news_type"] = self.deal_str(news_type)
            info_item["record_type"] = record_type
            info_item["create_date"] = create_date
            yield info_item

            ignore_link_item = IgnoreLinkItem()
            ignore_link_item["link"] = link
            ignore_link_item["create_date"] = create_date
            yield ignore_link_item

    def filter_html(self, html):
        """
        过滤html标签
        :param html:
        :return:
        """
        dr = re.compile(r'<[^>]+>', re.S)
        dd = dr.sub('', html)
        return dd

    def deal_str(self, data):
        """
        处理字符串
        :param data:
        :return:
        """
        if data is None:
            return None
        data = data.replace('\n', '')
        data = data.replace('\t', '')
        data = data.replace('\r', '')
        return data