#!/usr/bin/env python
# -*- coding: utf-8 -*-

# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: http://doc.scrapy.org/en/latest/topics/item-pipeline.html
import json

import re

import time

from tutorial.base.Stringhand import StringHand
from tutorial.mysqlmoudel.myusesql.spidersql import SpiderSql
from tutorial.mysqlmoudel.sqlbuilder import MysqlBuilder
from tutorial.mysqlmoudel.sqlfactory import Abssql


class TutorialPipeline(object):

    vat_factor = 1.15

    def __init__(self, mysql, spidersql):
        self.mysql = mysql
        self.cursor = mysql.cursor
        self.spidersql = spidersql
        self.list = []

    def process_item(self, item, spider):
        '''
        这个函数每返回一个item执行一次
        :param item: 
        :param spider: 
        :return: 
        '''
        print ("**********process_item***********")
        # print dict(item)
        item = StringHand.alltostring(item)  #将其中的unicode编码转换成string编码

        item = self.effectiveitem(item)  # string中提取有效数据
        item['fetchtime'] = int(time.time())
        item['status'] = 1 # 默认为1 删除评论为0
        # item['helpful'] = item['helpful'].strip()
        # print item
        # self.list.append(item)
        #self.file.write(json.dumps(dict(item)))
        # print item
        try:
            sql = self.spidersql.savecommentitem(item)  #保存到数据库
            self.cursor.execute(sql)
            # data = self.cursor.fetchall()
            # print self.cursor.rowcount
            # print data
            # print "*********exe sql*****************"
        except:
            spider.logger.error("sql execute err,please check")
            self.mysql.db.rollback()
        else:
            spider.logger.info("sql execute success")
        return item

    def open_spider(self, spider):
        '''
        第二个执行
        爬虫开始的时候执行
        :param spider: 
        :return: 
        '''
        print ("**********open_spider***********")
        spider.spidersql = self.spidersql
        spider.cursor = self.cursor
        spider.mysql = self.mysql
        # self.file = open("test.json","a+")



    def close_spider(self, spider):
        '''
        最后执行
        爬虫结束的时候执行
        :param spider: 
        :return: 
        '''
        print ("**********close_spider***********")
        self.cursor.close()
        self.mysql.db.commit()
        self.mysql.close()
        # self.file.close()


    @classmethod
    def from_crawler(cls,crawler):
        '''
        第一个执行
        他是一个类方法，返回一个新的piplines，从crawler
        crawler可以全局访问设置等以及信号
        :param crawler:
        :return:
        '''
        print ("**********from_crawler***********")
        mysql = Abssql.getsqlfractry().mysqlFractry("mysql")
        builder = MysqlBuilder('./tutorial/config/mysql.ini', 'mysqllocalhost')
        moudle = builder.build_all().get_moudle().todict()
        print(moudle)
        mysql.link(moudle)
        spidersql = SpiderSql()
        return cls(mysql,spidersql)

    def effectiveitem(self,item):
        '''
        提取有效的item，对item进行正则匹配
        :param item: 
        :return: 
        '''
        # print item
        matchobj = re.search('/gp/customer-reviews/(.*?)/', item['amazoncommentid'], flags=0)
        if matchobj != None:
            # print matchobj.group(), matchobj.group(1)
            item['amazoncommentid'] = matchobj.group(1)
        matchobj = re.search('/gp/customer-reviews/(.*?)\?ASIN/', item['amazoncommentid'], flags=0)
        if matchobj != None:
            # print matchobj.group(), matchobj.group(1)
            item['amazoncommentid'] = matchobj.group(1)

        if item['helpful'] != '0':
            matchobj = re.search('\s*(.*?) people found', item['helpful'], flags=0)
            if matchobj != None:
                # print matchobj.group(), matchobj.group(1)
                if matchobj.group(1) == 'One':
                    item['helpful'] = '1'
                else:
                    item['helpful'] = matchobj.group(1)
            else:
                item['helpful'] = '0'

        matchobj = re.match('^(.*?) out', item['grade'], flags=0)
        if matchobj != None:
            # print matchobj.group(), matchobj.group(1)
            item['grade'] = matchobj.group(1)
        matchobj = re.search('/gp/profile/(.*?)/', item['authorid'], flags=0)
        if matchobj != None:
            # print matchobj.group(), matchobj.group(1)
            item['authorid'] = matchobj.group(1)
        matchobj = re.search('on (.*)', item['date'], flags=0)
        if matchobj != None:
            # print matchobj.group(), matchobj.group(1)
            item['date'] = matchobj.group(1)
            # print datetime.datetime.strptime(matchobj.group(1), '%B %d, %Y')
        return item



class ChangeCustIdPipeline(object):
    def __init__(self):
        pass

    def process_item(self, item, spider):
        '''
        这个函数每返回一个item执行一次
        :param item: 
        :param spider: 
        :return: 
        '''
        self.file.write(json.dumps(item))
        return item

    def open_spider(self, spider):
        '''
        第二个执行
        爬虫开始的时候执行
        :param spider: 
        :return: 
        '''
        self.file = open("test.json", "a+")

    def close_spider(self, spider):
        '''
        最后执行
        爬虫结束的时候执行
        :param spider: 
        :return: 
        '''
        self.file.close()

    @classmethod
    def from_crawler(cls, crawler):
        '''
        第一个执行
        他是一个类方法，返回一个新的piplines，从crawler
        crawler可以全局访问设置等以及信号
        :param crawler:
        :return:
        '''
        return cls()
