# -*- coding: utf-8 -*-
import scrapy,MySQLdb,time
from scrapy import Spider, Item, Field, log ,Request
from framwork.models import StoreBook,StoreChapter,Novel
import framwork.db as db
from multiprocessing.dummy import Pool as ThreadPool #多线程

DUMP_DATA = False

def output(text):
    if DUMP_DATA:
        print text

def toPageInt(str):
    page = int(str.split('|')[0].split('-')[1].strip(' '))
    return page

db.create_engine(user='root', password='', database='bookstore')

class ReadScrapy(scrapy.Spider):
    name = 'readspider'
    # start_urls = ['http://www.juxread.com/book/list.aspx']
    start_urls = ['http://www.qyuread.com/book/List.aspx?rd=81040&classid=0&updatestatus=0&feetype=0&sort=0&WordSize=0&pagesize=20&maxpage=11&PageIndex=1']
    # url = 'http://www.qyuread.com/book/List.aspx?rd=81040&classid=0&updatestatus=0&feetype=0&sort=0&WordSize=0&pagesize=20&maxpage=11&PageIndex=1'

    def parse(self,response):
        pageStr = response.css("span.record::text").extract()[0].encode('utf8')
        page = toPageInt(pageStr)
        print page
        index = 0
        start = time.time()
        # pool = ThreadPool(processes=4)
        for i in range(page):
            for bookList in response.css("table.stacks-table tr"):
                # url = bookList.css("a::attr(href)").extract()[0].encode('utf8')
                # url = bookList.css("a::text").extract()[0].encode('utf8')
                # print bookList
                # name = bookList.css("tr td.f14 c-666")
                # name = bookList.css("tr td::text")
                name = bookList.css("tr td a::text")
                author = bookList.css("tr td[style^='padding-left']::text")
                if len(name):
                    name = name.extract()[0].encode('utf8')
                    name = name.split('  ')[0]
                    index = index+1
                else:
                    name = ''
                if len(author):
                    author = author.extract()[0].encode('utf8')
                else:
                    author = ''
                # print bookList
                print "书名:" + name
                print '======='
                print  "作者:" + author
                # try:
                #     u = Novel(novel_name=name,author=author)
                #     u.insert()
                # except Exception as e:
                #     raise
            # yield Request(url='http://www.juxread.com/book/List.aspx?rd=81102&classid=0&updatestatus=0&feetype=0&sort=0&WordSize=0&pagesize=20&maxpage=52&PageIndex=' + str(i) , meta={}, callback=self.parse)
            yield Request(url='http://www.qyuread.com/book/List.aspx?rd=81040&classid=0&updatestatus=0&feetype=0&sort=0&WordSize=0&pagesize=20&maxpage=11&PageIndex=' + str(i) , meta={}, callback=self.parse)
            print "总数据量：" + str(index)
            # bookName = bookList.css('td.f14 c-666::text').extract()[0].encode('utf8')
            # output(bookName)

            # 使用多线程提高效率
        #     pool.close()
        # pool.join()
        print '执行爬虫时间:', time.time() - start


    # def parse(self,response):
    #     for bookList in response.css("div.rank-header clearfix ul ul"):
    #         name = bookList.css("li div.li-img clearfix div.li-img-right h4 a::text")
    #         url = bookList.css("li div.li-img clearfix div.li-img-right h4 a::attr(href)")
    #         author = bookList.css("li div.li-img clearfix div.li-img-right p::attr(href)")
    #         print bookList
    #         print name
    #         print url
    #         print author
