# -*- coding: utf-8 -*-
import scrapy
from scrapy import Spider, Item, Field, log ,Request
import MySQLdb,json
from framwork.models import StoreBook,StoreChapter
import framwork.db as db

DUMP_DATA = False

def output(text):
    if DUMP_DATA:
        print text

def toPageInt(str):
    page = int(str.split('|')[0].split('-')[1].strip(' '))
    return page

db.create_engine(user='root', password='', database='bookstore')
# DB=MySQLdb.connect(host="localhost",user="root",passwd="",db="bookstore",charset="utf8")

class UploadScrapy(scrapy.Spider):
    name = 'uploadSpider'
    start_urls = ['http://ht.juxread.com/Upload/Novel/Chapter/']
    # http://www.qyuread.com/Upload/Novel/Chapter/

    def parse(self,response):
        index = 0
        for bookNmaeList in response.css("pre a"):
            bookName = bookNmaeList.css("a[href*='Chapter']::text")
            bookHref = bookNmaeList.css("a::attr(href)")
            if len(bookName):
                index = index+1
                bookName = bookName.extract()[0].encode('utf8')
                bookHref = bookHref.extract()[0].encode('utf8')

                try:
                    u = StoreBook(bookname=bookName,author='ceshi')
                    u.insert()
                except Exception as e:
                    raise

            # print bookName + bookHref
            bookUrl = 'http://ht.juxread.com/Upload/Novel/Chapter/' + str(bookName)
            yield Request(url=bookUrl, meta={'bookUrl':str(bookUrl)}, callback=self.parseChapter)
        # print "图书总数量:" + str(index)

    def parseChapter(self,response):
        bookUrl = response.meta['bookUrl']
        for chapterUrl in response.css("pre a"):
            url = chapterUrl.css("a[href*='%']::text")
            # articleUrl = ''
            if len(url):
                url = url.extract()[0].encode('utf8')
                articleUrl = bookUrl + '/' + str(url)
                # print articleUrl
                yield Request(url=articleUrl, meta={'articleUrl':str(articleUrl)}, callback=self.parseArticle)

    def parseArticle(self,response):
        articleUrl = response.meta['articleUrl']
        for articleHtml in response.css("pre a"):
            textName = articleHtml.css("a[href*='.txt']::text")
            if len(textName):
                articleTxtUrl = articleUrl + '/' + str(textName.extract()[0].encode('utf8'))
                try:
                    u = StoreChapter(chapter_name = textName.extract()[0].encode('utf8'),article_url = articleTxtUrl)
                    u.insert()
                except Exception as e:
                    raise
                # print articleTxtUrl
