# -*- coding: utf-8 -*-
from qiniu import Auth, put_file, etag, urlsafe_base64_encode
import qiniu.config
import urllib
import urllib2
import requests
import scrapy
import json
import time
import MySQLdb
import datetime

# from jiandan.items import JiandanItems
class Tu525Spider(scrapy.Spider):
    name = "tu5250"
    allowed_domains = ["tu.525j.com.cn"]
    start_urls = ['http://tu.525j.com.cn/jplist/0_-1_0_5_0_0_-1_0_0_1.html']
    custom_settings = {
        'DUPEFILTER_DEBUG': 'true',
    }
    index = 0;
    last_urls = [];
    last_url = '';
    last_page_url = start_urls[0]
    page_index = 2

    def fetchResource(self, other_url):
        ccess_key = 'DC5XOomwrxFJg-8hht7QpC-l7RXK95KcwBXA7XXD'
        secret_key = 'i0XgI-BJo7Wdr6XZZXGWy7SIIazLQOGYWJtwQNGj'

        url = 'http://iovip.qbox.me'
        body = '/fetch/<EncodedURL>/to/<EncodedEntryURI>'
        content_type = 'application/x-www-form-urlencoded'

        entry = 'raspberry-zero'
        encoded_entry_url = urlsafe_base64_encode(entry)
        other_url = other_url

        encoded_url = urlsafe_base64_encode(other_url)
        body = body.replace('<EncodedURL>',encoded_url).replace('<EncodedEntryURI>',encoded_entry_url)
        url = url+body

        auth = Auth(ccess_key,secret_key)
        authorization =auth.token_of_request(url=url,content_type=content_type)
        authorization = 'QBox ' + authorization

        headerdata =  {'Host':'iovip.qbox.me','Content-Type':content_type,'Authorization':authorization}
        print(url)
        print(body)
        print(authorization)
        r = requests.post(url=url,headers=headerdata)
        # json_str = json.dumps(r.text)
        s1 = json.loads(r.text)
        # print json_str
        print s1["key"]
        # db = MySQLdb.connect(host="localhost", user="alisql",passwd="alisql@277285590", charset="utf8",unix_socket="/soft/mysql/mysql.sock")
        db = MySQLdb.connect(host="localhost", user="alisql",passwd="alisql@277285590", charset="utf8")
        cursor = db.cursor()
        cursor.execute("use `%s`;" % "smart_home")
        create_time = time.strftime("%Y-%m-%d %H:%M:%S")
        sql = "insert into smart_home (url,create_time) VALUES (%s,%s)" % (s1["key"],create_time)
        try:
            cursor.execute(sql)
            db.commit()
        except:
            # Rollback in case there is any error
            db.rollback()
        db.close()

    def parseDetailPage(self, response):
        img_des_urls = response.xpath('//img[@class="hasbigPic"]//@src').extract()
        filename = "现代.md"
        # with open(filename, 'awb') as f:
        #     for img_url in img_des_urls:
        #         string = "![](%s)\r\n" % img_url
        #         # string = "<img width=\"150\" height=\"150\" src=%s />" % img_url
        #         f.write(string)
        #     # f.write(response.body)
        #     f.close()
        # new_url= response.xpath('//a[@class="caseBtn aftBtn"]//@href').extract_first()#翻页
        self.fetchResource(img_des_urls[0])
        self.index +=1
        yield scrapy.Request(self.last_page_url,callback=self.parse,dont_filter = True)

    def parse(self, response):
        # item = JiandanItem()
        # print response.body;
        print self.index
        if self.last_urls == [] :
            imgtu_url = response.xpath('//a[contains(@href, "share")]//@href').extract()
            self.last_urls = list(set(imgtu_url))
            print self.last_urls
        if self.index >= len(self.last_urls):
            page_urls_selector = response.xpath('//div[@class="f_y_Con"]')
            string = "./a[contains(.//text(), \"%s\")]//@href" % self.page_index
            page_urls = page_urls_selector.xpath(string).extract_first()
            print "page ",page_urls
            page_url = "http://tu.525j.com.cn" +page_urls
            print 'page_url',page_url
            self.index = 0
            self.page_index += 1
            imgtu_url = ''
            self.last_urls = []
            self.last_url = ''
            self.last_page_url = page_url
            print self.last_urls
            yield scrapy.Request(page_url,callback=self.parse,dont_filter = True)
        else:
            imgtu_url = self.last_urls[self.index]
            if imgtu_url:
                if imgtu_url != "javascript:;":
                    new_imgtu_url = "http://tu.525j.com.cn" + imgtu_url
                    print 'new_url',new_imgtu_url
                    yield scrapy.Request(new_imgtu_url,callback=self.parseDetailPage)
                else :
                    self.index +=1
                    yield scrapy.Request(self.last_page_url,callback=self.parse)
            # yield scrapy.Request(response.urljoin(new_imgtu_url),callback=self.parse )


        # img_url = response.xpath('//img//@src').extract()#提取图片链接
        # print img_url
        # # yield item
        # new_url= response.xpath('//div[@class="f_y_Con"]/a//@href').extract()[1]#翻页
        # # print "123123112312: %s" % new_url
        # # print 'new_url',new_url
        # if new_url:
        #     if new_url != "javascript:;":
        #         new_url = "http://tu.525j.com.cn" + new_url
        #         print 'new_url',new_url
        #     scrapy.Request(new_url,callback=self.parse)
