# -*- coding: utf-8 -*-
import scrapy
from inter_game_spider import settings
from utils import help
from inter_game_spider.items import InfoItem, IgnoreLinkItem
import logging
import re
import json
logger = logging.getLogger(__name__)


class CnbcSpiderSpider(scrapy.Spider):
    name = 'cnbc_spider'
    allowed_domains = ['search.cnbc.com']
    base_url = 'https://search.cnbc.com/rs/search/view.html?partnerId=2000&keywords=%s&sort=date&pubtime=0&pubfreq=a&page=%d'
    # 一批多少条数据
    # batch = 10
    start_page = 1

    def start_requests(self):
        company_name_list = help.get_company_code()
        for company in company_name_list:
            company_code = company[0]
            logger.info("crawling:%s" % company_code)
            url = self.base_url % (company_code, self.start_page)
            logger.info('crawling: %s' % url)
            yield scrapy.Request(url=url, callback=self.parse, dont_filter=True,
                                 meta={"code": company_code, "current_page": self.start_page})

    def parse(self, response):
        current_page = response.meta["current_page"]
        company_code = response.meta["code"]
        titles = response.xpath('//div[@class="SearchResultCard"]/h3[@class="title"]/a/text()').extract()
        links = response.xpath('//div[@class="SearchResultCard"]/h3[@class="title"]/a/@href').extract()
        pub_times = response.xpath('//div[@class="SearchResultCard"]/time/script/text()').extract()
        authors = response.xpath('//div[@class="attribution"]/span[@class="byline"]/text()').extract()
        news_types = response.xpath('//div[@class="attribution"]/span[@class="source"]/text()').extract()
        platform = settings.PLATFORM_CNBC
        record_type = settings.NEWS_TYPE_ARTICLE
        nums = len(links)
        if nums < 1:
            print "%s company crawled end...." % company_code
        else:
            for i in range(nums):
                meta = {
                    "title": titles[i],
                    "link": links[i],
                    "pub_time": self.deal_array(pub_times, i),
                    "author": self.deal_array(authors, i),
                    "news_type": self.deal_array(news_types, i),
                    "platform": platform,
                    "record_type": record_type,
                    "code": company_code
                }
                url = "http:" + links[i]
                yield scrapy.Request(url=url, callback=self.parse_detail, dont_filter=True,
                                     meta=meta)
            # 页数加一继续迭代
            current_page = current_page + 1
            next_url = self.base_url % (company_code, current_page)
            yield scrapy.Request(url=next_url, callback=self.parse, dont_filter=True,
                                 meta={"code": company_code, "current_page": current_page})

    def parse_detail(self, response):
        # 获取时间
        time = response.xpath('//time[@class="datestamp"]/@datetime').extract()
        if len(time) > 0:
            pub_time = self.deal_pubtime(time[0])
        else:
            pub_time = self.deal_pubtime_by_script(response.meta["pub_time"])

        # 正文内容
        content = response.xpath('//p/text()').extract()
        content = ''.join(content)
        content = self.deal_str(content)

        title = response.meta["title"]
        link = response.meta["link"]
        # pub_time = response.meta["pub_time"]
        author = response.meta["author"]
        news_type = response.meta["news_type"]
        platform = response.meta["platform"]
        record_type = response.meta["record_type"]
        company = response.meta["code"]
        create_date = help.get_current_time()

        # 下面封装好item 送往数据库
        info_item = InfoItem()
        info_item["pub_time"] = pub_time
        info_item["title"] = self.deal_str(title)
        # 这里要转换下
        info_item["link"] = "http:" + link
        info_item["content"] = content
        info_item["platform"] = platform
        info_item["author"] = self.deal_str(author)
        info_item["company"] = company
        info_item["news_type"] = self.deal_str(news_type)
        info_item["record_type"] = record_type
        info_item["create_date"] = create_date
        yield info_item

        ignore_link_item = IgnoreLinkItem()
        ignore_link_item["link"] = link
        ignore_link_item["create_date"] = create_date
        yield ignore_link_item

    def deal_str(self, data):
        """
        处理字符串
        :param data:
        :return:
        """
        if data is None:
            return None
        data = data.replace('\n', '')
        data = data.replace('\t', '')
        data = data.replace('\r', '')
        return data

    def deal_array(self, arr, i):
        """
        判断数组有没有超过
        :param arr:
        :param i:
        :return:
        """
        l = len(arr)
        if i + 1 > l:
            return None
        else:
            return arr[i]

    def deal_pubtime(self, pub_time):
        pub_time = pub_time.split('-')[0]
        pub_time = pub_time.replace('T', ' ')
        return pub_time

    def deal_pubtime_by_script(self, pub_time):
        pub_time = pub_time.split('(')
        result = pub_time[2].replace(')', '')
        result = result.replace(';', '')
        result = help.timestap_2_datetime(result)
        return result
