#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
from locale import *
import logging
import logging.config

import scrapy
from scrapy import signals

from tutorial.items import AmazonCommentId


class baseamazon(scrapy.Spider):
    logger = ''
    def __init__(self, category=None, *args, **kwargs):

        '''
        start_urls 会去调用start_requests 如果重写，但要接受参数，必须要init
        :param category: 
        :param args: 
        :param kwargs: 
        '''
        print ("**********baseamazon spider __init__***********")
        super(baseamazon, self).__init__(*args, **kwargs)
        self.init()

    @classmethod
    def from_crawler(cls, crawler, *args, **kwargs):
        print ("**********base spider from_crawler***********")
        spider = super(baseamazon, cls).from_crawler(crawler, *args, **kwargs)
        return spider

    def spider_opened(self):
        logging.info("HooksasyncExtension, signals.spider_opened fired")

    def spider_idle(self):
        logging.info("HooksasyncExtension, signals.spider_idle fired")

    def spider_closed(self, reason):
        logging.info("HooksasyncExtension, signals.spider_closed fired")

    def spider_error(self, failure, response):
        logging.info("HooksasyncExtension, signals.spider_error fired")

    def request_scheduled(self, request):
        logging.info("HooksasyncExtension, signals.request_scheduled fired")

    def response_received(self, response, request):
        logging.info("HooksasyncExtension, signals.response_received fired")

    def response_downloaded(self, response, request):
        logging.info("HooksasyncExtension, signals.response_downloaded fired")

    def item_scraped(self, item, response):
        logging.info("HooksasyncExtension, signals.item_scraped fired")

    def item_dropped(self, item, response, exception):
        logging.info("HooksasyncExtension, signals.item_dropped fired")

    def init(self):
        path = os.path.abspath(os.path.dirname(__file__))
        dirlist = path.split(os.sep)
        dirlist.pop()
        dirlist.append('config')
        dirlist.append('logging.conf')
        path = os.sep.join(dirlist)
        logging.config.fileConfig(path)
        logger_name = "root"
        self.logger = logging.getLogger(logger_name)








class amazonCommentSpider(baseamazon):
    name = "amazonCommentSpiderUs"
    custom_settings = {
        'ITEM_PIPELINES': {'tutorial.pipelines.TutorialPipeline': 300},
        'DOWNLOADER_MIDDLEWARES': {'tutorial.middlewares.ProxyMiddleware': 100, }
    }



    # start_urls = [
    #     "https://www.amazon.com/product-reviews/B000CRFOMK/ref=cm_cr_pr_viewopt_sr?ie=UTF8&showViewpoints=1&sortBy=recent&reviewerType=all_reviews&formatType=current_format&pageNumber=1"
    # ]


    def __init__(self, category=None, *args, **kwargs):

        '''
        start_urls 会去调用start_requests 如果重写，但要接受参数，必须要init
        :param category: 
        :param args: 
        :param kwargs: 
        '''
        print ("**********spider __init__***********")
        super(amazonCommentSpider, self).__init__(*args, **kwargs)
        # self.start_urls = ["https://www.amazon.com/product-reviews/%s/ref=cm_cr_pr_viewopt_sr?ie=UTF8&showViewpoints=1&sortBy=recent&reviewerType=all_reviews&formatType=current_format&pageNumber=1" % asin]

    # def start_requests(self):
    #     yield scrapy.Request('http://www.example.com/categories/%s' % self.category)

    @classmethod
    def from_crawler(cls, crawler, *args, **kwargs):
        print ("**********spider from_crawler***********")
        spider = super(amazonCommentSpider, cls).from_crawler(crawler, *args, **kwargs)
        # crawler.signals.connect(spider.spider_closed, signal=signals.spider_closed)
        return spider
        pass

    # def spider_closed(self, spider):
    #     spider.logger.info('Spider closed: %s', spider.name)

    def start_requests(self):
        print ("**********start_requests***********")
        self.channel = 'Amazon.com'
        asin = getattr(self, 'asin', None)
        if asin is not None:
            # B001QA4TQO
            url = "https://www.amazon.com/product-reviews/%s/ref=cm_cr_pr_viewopt_sr?ie=UTF8&showViewpoints=1&sortBy=recent&reviewerType=all_reviews&formatType=current_format&pageNumber=1" % asin
            self.logger.info(url)
            yield scrapy.Request(url, self.parse, headers={
                "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/62.0.3192.0 Safari/537.36",
            })
        else:
            self.logger.error("asin is null, no request will produce")


    def parse(self, response):  # 请求返回的页面
        print ("***********parse******************")
        filename = "Spider.html"
        with open(filename, 'wb') as f:
            f.write(response.body)
        # result = response.xpath('//*[@id="customer_review-R11PJZ1XJIXRRV"]/div[1]/a[2]/font/font')
        # with open("test.html", 'w') as f:
        #     f.write(response.body)
        if response.body == None or response.body == '':
            self.logger.info("have a url request is null: %s " % response.url)
        asin = getattr(self, 'asin', None)
        reviewlist = response.css('div#cm_cr-review_list')
        reviewnumber = reviewlist.css('div.a-section')
        reviewnumber = reviewnumber[0].css("span.a-size-base")[0].css("span.a-size-base::text").re(r'of(.*)reviews')
        setlocale(LC_NUMERIC, 'English_US')  # 带千位分隔符转换成整数需要
        reviewnumber = int(atof(reviewnumber[0].strip()))
        pagenumber = reviewnumber / 10 + 1  # python 除不尽就丢弃多余的，所以页数加1
        print pagenumber
        for page in range(2, pagenumber + 1):  # range前面2产生，后面一个不产生
            url = "https://www.amazon.com/product-reviews/%s/ref=cm_cr_pr_viewopt_sr?ie=UTF8&showViewpoints=1&sortBy=recent&reviewerType=all_reviews&formatType=current_format&pageNumber=%d" % (
            asin, page)
            yield scrapy.Request(url=url, callback=self.parseOtherPage)
        reviews = reviewlist.css('div[data-hook=\"review\"]')
        for review in reviews:
            try:
                rows = review.css('div.a-row')  # 每条评论会分行
                amazoncomment = self.cssGetDate(asin, rows)
                self.logger.info("have a url get date: %s " % response.url)
                yield amazoncomment
            except:
                raise ("please check this")

    def parseOtherPage(self, response):  # 请求返回的页面
        # filename = response.url.split("/")[-2]
        # with open(filename, 'wb') as f:
        #     f.write(response.body)
        # result = response.xpath('//*[@id="customer_review-R11PJZ1XJIXRRV"]/div[1]/a[2]/font/font')
        # with open("test.html", 'w') as f:
        #     f.write(response.body)
        if response.body == None or response.body == '':
            self.logger.info("have a url request is null: %s " % response.url)
        asin = getattr(self, 'asin', None)
        reviewlist = response.css('div#cm_cr-review_list')
        reviews = reviewlist.css('div[data-hook=\"review\"]')
        for review in reviews:
            rows = review.css('div.a-row')  # 每条评论会分行
            amazoncomment = self.cssGetDate(asin, rows)
            self.logger.info("have a url get date: %s " % response.url)
            yield amazoncomment

    def cssGetDate(self, asin, rows):
        amazoncomment = AmazonCommentId()
        amazoncomment['AppName'] = 11
        amazoncomment['asin'] = asin
        amazoncomment['amazoncommentid'] = rows[0].css("a[data-hook=\"review-title\"]::attr(href)").extract()[0]
        amazoncomment['grade'] = rows[0].css("span.a-icon-alt::text").extract()[0]
        amazoncomment['title'] = rows[0].css("a[data-hook=\"review-title\"]::text").extract()[0]
        amazoncomment['author'] = rows[1].css("a[data-hook=\"review-author\"]::text").extract()[0]
        amazoncomment['authorid'] = rows[1].css("a[data-hook=\"review-author\"]::attr(href)").extract()[0]
        amazoncomment['date'] = rows[1].css("span[data-hook=\"review-date\"]::text").extract()[0]
        try:
            amazoncomment['commenttype'] = rows[2].css("span[data-hook=\"avp-badge\"]::text").extract()[0]
        except:
            amazoncomment['commenttype'] = "null"
        amazoncomment['content'] = rows[3].css("span[data-hook=\"review-body\"]::text").extract()[0]
        try:
            amazoncomment['helpful'] = rows[4].css("span[data-hook=\"helpful-vote-statement\"]::text").extract()[0]
        except:
            amazoncomment['helpful'] = '0'
        amazoncomment['participant'] = '0'
        amazoncomment['channel'] = self.channel
        return amazoncomment

