# -*- coding: utf-8 -*-

# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: http://doc.scrapy.org/en/latest/topics/item-pipeline.html
import time
import traceback

from proxyips.base.Stringhand import StringHand
from proxyips.mysqlmoudel.myusesql.spidersql import SpiderSql
from proxyips.mysqlmoudel.sqlbuilder import MysqlBuilder
from proxyips.mysqlmoudel.sqlfactory import Abssql

# reload(sys)
# sys.setdefaultencoding('utf-8')


class ProxyIpsPipeline(object):
    def __init__(self, crawler, mysql, spidersql):
        self.mysql = mysql
        self.cursor = mysql.cursor
        self.spidersql = spidersql
        self.list = []

    def process_item(self, item, spider):
        """
        这个函数每返回一个item执行一次
        :param item: 
        :param spider: 
        :return: 
        """
        print("**********process_item***********")
        # sql = "REPLACE INTO mws_proxy_ips (usetimes,Responsespeed,errtimes,ip,created_at,Finalverificationtime,updated_at,source,lapseTimes,anonymous,position,work,type,port) VALUES ('0','中文测试','0','119.115.164.76','2017-10-13 14:54:07','2017-10-11 18:46:18','2017-10-13 14:54:07','1','0','中文测试','中文测试','1','HTTP','8080')"
        print (dict(item))
        item = StringHand.alltostring(item)  # 将其中的unicode编码转换成string编码
        item['created_at'] = item['updated_at'] = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time()))
        item['work'] = item['source'] = '1'  # 可以工作
        item['errtimes'] = item['usetimes'] = item['lapseTimes'] = '0'
        # print item
        try:
            sql = self.spidersql.savecommentitem("mws_proxy_ips", item)  # 保存到数据库
            print ("*************sql**********************")
            print (sql.encode('utf-8'))
            print ("*************sql**********************")
            self.cursor.execute(sql)
            self.mysql.db.commit()
            # data = self.cursor.fetchall()
            # print self.cursor.rowcount
            # print data
            # print "*********exe sql*****************"
        except:
            traceback.print_exc()
            spider.logger.error("sql execute err,please check")
            self.mysql.db.rollback()
        else:
            spider.logger.info("sql execute success")
        return item

    def open_spider(self, spider):
        """
        第二个执行
        爬虫开始的时候执行
        :param spider: 
        :return: 
        """
        print("**********open_spider***********")
        spider.spidersql = self.spidersql
        spider.cursor = self.cursor
        spider.mysql = self.mysql
        self.spider = spider

    def close_spider(self, spider):
        """
        最后执行
        爬虫结束的时候执行
        :param spider: 
        :return: 
        """
        print("**********close_spider***********")
        self.cursor.close()
        self.mysql.db.commit()
        self.mysql.close()

    @classmethod
    def from_crawler(cls, crawler):
        """
        第一个执行
        他是一个类方法，返回一个新的piplines，从crawler
        crawler可以全局访问设置等以及信号
        :param crawler:
        :return:
        """
        print("**********from_crawler***********")
        mysql = Abssql.getsqlfractry().mysqlFractry("mysql")
        builder = MysqlBuilder('./proxyips/config/mysql.ini', 'mysqllocalhost')
        moudle = builder.build_all().get_moudle().todict()
        print(moudle)
        mysql.link(moudle)
        spidersql = SpiderSql()
        # cs = crawler.signals.connect
        # s = cls()
        return cls(crawler, mysql, spidersql)

class effectivePipeline(object):
    def __init__(self, crawler, mysql, spidersql):
        self.mysql = mysql
        self.cursor = mysql.cursor
        self.spidersql = spidersql
        self.list = []

    def process_item(self, item, spider):
        """
        这个函数每返回一个item执行一次
        :param item: 
        :param spider: 
        :return: 
        """
        print("**********process_item***********")
        return item

    def open_spider(self, spider):
        """
        第二个执行
        爬虫开始的时候执行
        :param spider: 
        :return: 
        """
        print("**********open_spider***********")
        spider.spidersql = self.spidersql
        spider.cursor = self.cursor
        spider.mysql = self.mysql
        self.spider = spider

    def close_spider(self, spider):
        """
        最后执行
        爬虫结束的时候执行
        :param spider: 
        :return: 
        """
        print("**********close_spider***********")
        self.cursor.close()
        self.mysql.db.commit()
        self.mysql.close()

    @classmethod
    def from_crawler(cls, crawler):
        """
        第一个执行
        他是一个类方法，返回一个新的piplines，从crawler
        crawler可以全局访问设置等以及信号
        :param crawler:
        :return:
        """
        print("**********from_crawler***********")
        mysql = Abssql.getsqlfractry().mysqlFractry("mysql")
        builder = MysqlBuilder('./proxyips/config/mysql.ini', 'mysqllocalhost')
        moudle = builder.build_all().get_moudle().todict()
        print(moudle)
        mysql.link(moudle)
        spidersql = SpiderSql()
        # cs = crawler.signals.connect
        s = cls(crawler, mysql, spidersql)
        # cs(s.spider_idle, signal=signals.spider_idle)
        return s


    # def spider_idle(self, spider):
    #     spider.logger.info("HooksasyncExtension, signals.spider_idle fired")
    #     for tuples in self.startbaiduandamaon(spider):
    #         if tuples:
    #             print tuples[0], tuples[1]
    #             spider.spider_idle(tuples[0],tuples[1])

    def startbaiduandamaon(self, spider):
        print("**********startbaiduandamaon***********")
        global datas2
        try:
            sql = self.spidersql.validationproxyip()  # 保存到数据库
            self.cursor.execute(sql)
            self.mysql.db.commit()
            datas = self.cursor.fetchall()
            datas2 = datas
            print (datas)
            for data in datas:
                sql = self.spidersql.lockproxyip('lock', data[0])
                self.cursor.execute(sql)
                self.mysql.db.commit()
            print(self.cursor.rowcount)
            # print datas
            print("*********exe sql*****************")
        except:
            self.spider.logger.error("sql execute err,please check")
            self.mysql.db.rollback()
            yield None
        else:
            self.spider.logger.info("sql execute success")
        print(datas2)
        if datas2 != None:
            for data in datas2:
                proxy = "http://%s:%s" % (str(data[1]).strip(), str(data[2]).strip())
                yield (proxy, data[0])
        else:
            print("**********datas2 is null*********")
            yield None

