# -*- coding: utf-8 -*-
import scrapy
from inter_game_spider import settings
from utils import help
from inter_game_spider.items import InfoItem, IgnoreLinkItem
import logging
logger = logging.getLogger(__name__)


class MarketwatchSpiderSpider(scrapy.Spider):
    name = 'marketwatch_spider'
    allowed_domains = ['marketwatch.com']

    base_url = "https://www.marketwatch.com/search?q=%s&o=%d&rpp=%d"
    # 一批多少条数据
    batch = 30
    start_num = 0

    def start_requests(self):
        company_name_list = help.get_company_code()
        for company in company_name_list:
            company_code = company[0]
            logger.info("crawling:%s" % company_code)
            # for i in range(self.start_page, self.max_pages):
            url = self.base_url % (company_code, self.start_num, self.batch)
            logger.info('crawling: %s' % url)
            yield scrapy.Request(url=url, callback=self.parse_nums, dont_filter=True, meta={"code": company_code})

    def parse_nums(self, response):
        all_nums = response.xpath('//div[@class="nextprevlinks"]/a/text()').extract()
        nums = self.get_nums(all_nums)
        times = nums / self.batch + 1
        code = response.meta["code"]
        for i in range(times):
            url = self.base_url % (code, i * self.batch, self.batch)
            logger.info('crawling: %s' % url)
            yield scrapy.Request(url=url, callback=self.parse, dont_filter=True, meta={"code": code})

    def parse(self, response):
        code = response.meta["code"]
        # 说明一页小于200条
        # print "less than %d datas , get directly..." % self.batch
        platform = settings.PLATFORM_MARKET_WATCH
        titles = response.xpath('//div[@class="searchresult"]/a/text()').extract()
        links = response.xpath('//div[@class="searchresult"]/a/@href').extract()
        pub_times = response.xpath('//div[@class="deemphasized"]/span/text()').extract()
        authors = response.xpath('//div[@class="deemphasized"]/text()').extract()
        # 新闻类别暂无,就等于作者
        news_types = authors
        record_type = settings.NEWS_TYPE_ARTICLE
        for i in range(len(links)):
            meta = {
                "platform": platform,
                "title": titles[i],
                "link": links[i],
                "news_type": news_types[i],
                "author": authors[i],
                "pub_time": pub_times[i],
                "code": code,
                "record_type": record_type
            }
            yield scrapy.Request(url=links[i], callback=self.parse_detail, dont_filter=True, meta=meta)
    '''
    def parse(self, response):
        # 得到某一页的总数
        all_nums = response.xpath('//div[@class="nextprevlinks"]/a/text()').extract()
        if len(all_nums) < 1:
            code = response.meta["code"]
            # 说明一页小于200条
            print "less than %d datas , get directly..." % self.batch
            platform = settings.PLATFORM_MARKET_WATCH
            titles = response.xpath('//div[@class="searchresult"]/a/text()').extract()
            links = response.xpath('//div[@class="searchresult"]/a/@href').extract()
            pub_times = response.xpath('//div[@class="deemphasized"]/span/text()').extract()
            authors = response.xpath('//div[@class="deemphasized"]/text()').extract()
            # 新闻类别暂无,就等于作者
            news_types = authors
            record_type = settings.NEWS_TYPE_ARTICLE
            for i in range(len(links)):
                meta = {
                    "platform": platform,
                    "title": titles[i],
                    "link": links[i],
                    "news_type": news_types[i],
                    "author": authors[i],
                    "pub_time": pub_times[i],
                    "code": code,
                    "record_type": record_type
                }
                yield scrapy.Request(url=links[i], callback=self.parse_detail, dont_filter=True, meta=meta)
        else:
            nums = self.get_nums(all_nums)
            # 得到当前翻页到了多少条
            current_num = response.meta["current_num"]
            code = response.meta["code"]
            platform = settings.PLATFORM_MARKET_WATCH
            titles = response.xpath('//div[@class="searchresult"]/a/text()').extract()
            links = response.xpath('//div[@class="searchresult"]/a/@href').extract()
            pub_times = response.xpath('//div[@class="deemphasized"]/span/text()').extract()
            authors = response.xpath('//div[@class="deemphasized"]/text()').extract()
            # 新闻类别暂无,就等于作者
            news_types = authors
            record_type = settings.NEWS_TYPE_ARTICLE
            for i in range(len(links)):
                meta = {
                    "platform": platform,
                    "title": titles[i],
                    "link": links[i],
                    "news_type": news_types[i],
                    "author": authors[i],
                    "pub_time": pub_times[i],
                    "code": code,
                    "record_type": record_type
                }
                yield scrapy.Request(url=links[i], callback=self.parse_detail, dont_filter=True, meta=meta)
            if current_num < nums:
                current_num = current_num + self.batch
                next_url = self.base_url % (code, current_num, self.batch)
                yield scrapy.Request(url=next_url, callback=self.parse, meta={"code": code,
                                                                              "current_num": current_num})
    '''
    def parse_detail(self, response):
        meta = response.meta
        pub_time = meta["pub_time"]
        title = meta["title"]
        link = meta["link"]

        content = response.xpath('//div[@id="article-body"]//p/text()').extract()
        content = ''.join(content)
        content = self.deal_str(content)

        platform = meta["platform"]
        author = meta["author"]
        company = meta["code"]
        news_type = meta["news_type"]
        record_type = settings.NEWS_TYPE_ARTICLE
        create_date = help.get_current_time()

        item = InfoItem()
        item["pub_time"] = self.deal_pub_time(pub_time)
        item["title"] = self.deal_str(title)
        item["link"] = link
        item["content"] = content
        item["platform"] = platform
        item["author"] = self.deal_str(author)
        item["company"] = company
        item["news_type"] = self.deal_str(news_type)
        item["record_type"] = record_type
        item["create_date"] = create_date
        yield item

        ignore_link_item = IgnoreLinkItem()
        ignore_link_item["link"] = link
        ignore_link_item["create_date"] = create_date
        yield ignore_link_item

    def deal_str(self, data):
        """
        处理字符串
        :param data:
        :return:
        """
        if data is None:
            return None
        data = data.replace('\n', '')
        data = data.replace('\t', '')
        data = data.replace('\r', '')
        return data

    def deal_pub_time(self, pub_time):
        """
        处理时间..
        :param pub_time:
        :return:
        """
        # u'5:47 a.m. May 13, 2015'
        pub_time = pub_time.split(' ')
        if "a.m." not in pub_time[1]:
            h_m = pub_time[0].split(":")
            h_m = str(12 + int(h_m[0])) + ":" + h_m[1]
        else:
            h_m = pub_time[0].split(':')
            if len(h_m[0]) < 2:
                h = "0"+h_m[0]
                h_m = h + ":" + h_m[1]
            else:
                h_m = ':'.join(h_m)
        month = pub_time[2]
        if 'Jan' in month:
            month = "01"
        elif 'Feb' in month:
            month = "02"
        elif 'Mar' in month:
            month = "03"
        elif 'Apri' in month:
            month = "04"
        elif 'May' in month:
            month = "05"
        elif 'Jun' in month:
            month = "06"
        elif 'July' in month:
            month = "07"
        elif 'Aug' in month:
            month = "08"
        elif 'Sep' in month:
            month = "09"
        elif 'Oct' in month:
            month = "10"
        elif 'Nov' in month:
            month = "11"
        elif 'Dec' in month:
            month = "12"
        else:
            print "error calendar... month: %s" % month
        y_m_d = pub_time[-1] + "-" + month + "-" + pub_time[3].replace(',', '')
        result = y_m_d + ' ' + h_m
        return result

    def get_nums(self, all_nums):
        result = all_nums[0].split(' ')[2]
        result = result.replace(',', '')
        return int(result)
