# -*- encoding:utf8 -*-
from scrapy.contrib.spiders import CrawlSpider
import pymysql
from scrapy.selector import Selector
from jd_spider.items import JdSpiderItem

def get_detail_url():
    config = {
        'host': "127.0.0.1"
        , 'port': 3306
        , 'user': 'root'
        , 'password': ''
        , 'db': 'test'
        , 'charset': 'utf8'
        , 'cursorclass': pymysql.cursors.DictCursor
    }
    connection = pymysql.connect(**config)
    try:
        with connection.cursor() as cursor:
            # 执行sql语句，插入记录
            sql = 'SELECT * from jd_bra_url'
            print(sql)
            cursor.execute(sql)
        # 没有设置默认自动提交，需要主动提交，以保存所执行的语句
        connection.commit()
        return cursor.fetchall()
    except Exception as e:
        return []
    finally:
        connection.close()

def record_failed(url):
    config = {
        'host': "127.0.0.1"
        , 'port': 3306
        , 'user': 'root'
        , 'password': ''
        , 'db': 'test'
        , 'charset': 'utf8'
        , 'cursorclass': pymysql.cursors.DictCursor
    }
    connection = pymysql.connect(**config)
    try:
        with connection.cursor() as cursor:
            # 执行sql语句，插入记录
            sql = 'INSERT INTO TABLE jd_bra_detail_failed (failed_url) VALUES (%s)'
            print(sql)
            cursor.execute(sql,(url))
        # 没有设置默认自动提交，需要主动提交，以保存所执行的语句
        connection.commit()
    finally:
        connection.close()

def save_to_mysql(itemid,detail):
    config = {
        'host': "127.0.0.1"
        , 'port': 3306
        , 'user': 'root'
        , 'password': ''
        , 'db': 'test'
        , 'charset': 'utf8'
        , 'cursorclass': pymysql.cursors.DictCursor
    }
    connection = pymysql.connect(**config)
    try:
        with connection.cursor() as cursor:
            # 执行sql语句，插入记录
            sql = 'INSERT INTO jd_bra_info (itemid,detail) VALUES (%s,%s)'
            cursor.execute(sql, (itemid,detail))
        # 没有设置默认自动提交，需要主动提交，以保存所执行的语句
        connection.commit()
    finally:
        connection.close()


class jd_item_detail(CrawlSpider):
    name = "jd_item_detail"
    start_urls_tmp = get_detail_url()
    start_urls = []
    for url in start_urls_tmp:
        #print("==============="+str(url)+"=====================")
        parma = "http://"+str(url["url"])
        #print(parma)
        start_urls.append(parma)

    def parse(self, response):
        sel = Selector(response)
        item_detail_list = sel.xpath(".//*[@id='detail']/div[2]/div[1]/div[1]/ul[2]/li/text()").extract()

        try:
            price = sel.xpath('.//*[@class="p-price"]/span[2]/text()').extract()
        except Exception as e:
            price = ""
        info = "------".join(item_detail_list)
        #"http://item.jd.com/10033845704.html"
        itemid = response.url.split(".")[2].split('/')[1]
        print(itemid)

        print(response.url)
        # item_detail_list.append(price)
        save_to_mysql(int(itemid),info)

        item = JdSpiderItem()
        return item







