# -*- coding: utf-8 -*-
import scrapy
from inter_game_spider import settings
from utils import help
from inter_game_spider.items import InfoItem, IgnoreLinkItem
import logging
logger = logging.getLogger(__name__)


class IbtimesSpiderSpider(scrapy.Spider):
    name = 'ibtimes_spider'
    allowed_domains = ['ibtimes.com']
    # search_word = 'MSFT'
    start_page = 0
    # 默认最大1000页
    max_pages = 1000

    base_url = 'http://www.ibtimes.com/search/site/%s?page=%d'

    def start_requests(self):
        company_name_list = help.get_company_code()
        for company in company_name_list:
            company_code = company[0]
            logger.info("crawling:%s" % company_code)
            for i in range(self.start_page, self.max_pages):
                url = self.base_url % (company_code, i)
                logger.info('crawling: %s' % url)
                yield scrapy.Request(url=url, callback=self.parse, dont_filter=True, meta={"code": company_code})

    def parse(self, response):
        code = response.meta["code"]
        platform = settings.PLATFORM_INTERNATIONAL_BUSINESS_TIME
        titles = response.xpath('//div[@class="info info-flex"]/h3/a/text()').extract()
        links = response.xpath('//div[@class="info info-flex"]/h3/a/@href').extract()
        # 新闻类别
        news_types = response.xpath('//div[@class="category"]/a/text()').extract()
        authors = response.xpath('//div[@class="byline hidden-xs"]/span[@class="author"]/a/text()').extract()
        pub_times = response.xpath('//div[@class="byline hidden-xs"]/text()').extract()
        # 过滤一下"By"
        pub_times = self.deal_pub_time(pub_times)

        nums = len(links)
        for i in range(nums):
            # 往下一个链接带入的数据
            meta = {
                "platform": platform,
                "title": titles[i],
                "link": links[i],
                "news_type": news_types[i],
                "author": authors[i],
                "pub_time": pub_times[i],
                "code": code
            }
            yield scrapy.Request(url=links[i], callback=self.parse_detail, dont_filter=True, meta=meta)

    def parse_detail(self, response):
        meta = response.meta
        pub_time = meta["pub_time"]
        title = meta["title"]
        link = meta["link"]

        content = response.xpath('//div[@class="article-body"]/p/text()').extract()
        content = ''.join(content)
        content = self.deal_str(content)

        platform = meta["platform"]
        author = meta["author"]
        company = meta["code"]
        news_type = meta["news_type"]
        record_type = settings.NEWS_TYPE_ARTICLE
        create_date = help.get_current_time()

        item = InfoItem()
        item["pub_time"] = pub_time
        item["title"] = title
        item["link"] = link
        item["content"] = content
        item["platform"] = platform
        item["author"] = author
        item["company"] = company
        item["news_type"] = news_type
        item["record_type"] = record_type
        item["create_date"] = create_date
        yield item

        ignore_link_item = IgnoreLinkItem()
        ignore_link_item["link"] = link
        ignore_link_item["create_date"] = create_date
        yield ignore_link_item

    def deal_str(self, data):
        """
        处理字符串
        :param data:
        :return:
        """
        if data is None:
            return None
        data = data.replace('\n', '')
        data = data.replace('\t', '')
        return data

    def deal_pub_time(self, pub_times):
        result = []
        if pub_times is None:
            return None
        for time in pub_times:
            if "By" not in time:
                time_msg = time.split(' ')
                y_m_d = time_msg[2]
                y_m_d = y_m_d.split('/')
                y_m_d = "20" + y_m_d[2] + "-" + y_m_d[0] + '-' + y_m_d[1]
                h_m = ''
                if time_msg[5] == 'PM':
                    h_m = time_msg[4].split(':')
                    h_m = str(12 + int(h_m[0])) + ":" + str(h_m[1])
                    pass
                else:
                    h_m = time_msg[4]
                    pass
                pub_time = y_m_d + ' ' + h_m
                result.append(pub_time)
        return result
