# -*- coding: utf-8 -*-
import scrapy
import json, pickle, redis, re
from douban.settings import redis_instance
from copy import deepcopy
import time
import logging
from xpinyin import Pinyin

logger = logging.getLogger(__name__)
pinyin = Pinyin()

tag_name = redis_instance.get('tag_name')
if tag_name:
    tag_name = str(tag_name)
else:
    tag_name = '儿童文学'
tag_name_pinyin = pinyin.get_pinyin(tag_name, '_')

class BookSpider(scrapy.Spider):
    name = 'book'
    allowed_domains = ['book.douban.com']
    latest_crawled_page_url = redis_instance.get('latest_page_%s' %tag_name_pinyin)
    if latest_crawled_page_url:
        latest_crawled_page_url = str(latest_crawled_page_url, encoding='utf-8')
    target_url = 'https://book.douban.com/tag/儿童文学?start=0&type=T'
    if latest_crawled_page_url:
        start_urls = ['%s' % latest_crawled_page_url]
    else:
        start_urls = ['%s' % target_url]

    def parse(self, response):
        if response.status == 200:
            redis_instance.set('latest_page_%s' %tag_name_pinyin, response.url)
            m = re.search(r'^http.*?start=(\d+)&type=T', response.url)
            current_page = 'page:%s:%s' %(tag_name_pinyin, m.group(1))
            # redis_instance.hset('page_hash', current_page, 0)
            li_list = response.xpath("//*[@id='subject_list']/ul/li")
            for li in li_list:
                item = {}
                item['current_page'] = current_page
                item['pic_url'] = li.xpath("./div[@class='pic']/a/img/@src").extract_first()
                info_div = li.xpath("./div[@class='info']")
                book_url = info_div.xpath("./h2/a/@href").extract_first()
                if book_url and len(book_url) > 0:
                    book_url_arr = book_url.split('/')
                    if len(book_url_arr) > 3:
                        item['sku'] = book_url_arr[-2]
                name = info_div.xpath("./h2/a/text()").extract_first()
                item['name'] = re.sub(r' ','',re.sub(r'\u3000','',re.sub(r'\n','',name)))
                info = info_div.xpath("./div[@class='pub']/text()").extract_first()
                if info and len(info) > 0:
                    info = re.sub(r' ','',re.sub(r'\u3000','',re.sub(r'\n','',info)))
                    info_arr = info.split('/')
                    if len(info_arr) == 5:
                        item['author'] = info_arr[0]
                        item['translator'] = info_arr[1]
                        item['pub'] = info_arr[2]
                        item['pub_date'] = info_arr[3]
                        item['price'] = info_arr[4]
                    elif len(info_arr) == 4:
                        item['author'] = info_arr[0]
                        item['pub'] = info_arr[1]
                        item['pub_date'] = info_arr[2]
                        item['price'] = info_arr[3]
                # star_text = info_div.xpath("./div[contains(@class,'star')]/text()").extract_first()
                # logger.info('star_text:\n%s' %star_text)
                star_div = info_div.xpath("./div[contains(@class,'star')]")
                item['douban_star'] = star_div.xpath("./span[1]/@class").extract_first()  
                item['douban_rank'] = star_div.xpath("./span[@class='rating_nums']/text()").extract_first()
                douban_cnt_voted = star_div.xpath("./span[@class='pl']/text()").extract_first()
                douban_cnt_voted = re.sub(r' ','',re.sub(r'人评价','',re.sub(r'\n','',douban_cnt_voted)))
                if douban_cnt_voted.find('(') >= 0:
                    douban_cnt_voted = douban_cnt_voted.replace('(','')
                if douban_cnt_voted.find(')') >= 0:
                    douban_cnt_voted = douban_cnt_voted.replace(')','')
                item['douban_cnt_voted'] = douban_cnt_voted
                intro = info_div.xpath("./p/text()").extract_first()
                try:
                    item['intro'] = re.sub(r'\u3000','', re.sub(r'\n', '', intro))
                except:
                    item['intro'] = intro
                book_url_exist = redis_instance.sismember("book_url_set", book_url)
                # book_url_exist = redis_instance.sadd("book_url_set", book_url)
                if book_url_exist == 0:
                    logger.info('===>Ready to cralwing book[%s]' %book_url)
                    item['book_url'] = book_url
                    if book_url.startswith('http'):
                        yield scrapy.Request(book_url, callback=self.parse_book, dont_filter=False, meta={"item": deepcopy(item)})
                    else:
                        yield scrapy.Request(response.urljoin(book_url), callback=self.parse_book, dont_filter=False, meta={"item": deepcopy(item)})
                else:
                    logger.info('===>Book url[%s] already crawled before.' %book_url)
                    redis_instance.hincrby('page_hash', item['current_page'], 1)
                cnt = redis_instance.hget('page_hash', item['current_page'])
                if not cnt:
                    cnt = 0
                else:
                    cnt = int(cnt)
                # logger.info('cnt=',cnt, item['current_page'])
                if cnt >= 20:
                    logger.info('===>Successfully crawled page [%s]' %item['current_page'])
                    next_page = int(item['current_page'].split(':')[2]) + 20
                    yield scrapy.Request('https://book.douban.com/tag/儿童文学?start=%d&type=T' %next_page, callback=self.parse, dont_filter=False)
        else:
            logger.info('===>Crawled page failed at url[%s], status:%d' %(response.url, response.status))



    def parse_book(self, response):
        if response.status == 200:
            item = deepcopy(response.meta["item"])
            # https://book.douban.com/subject/1077778/
            wrapper_div = response.xpath("//div[@id='wrapper']")
            # item['name'] = wrapper_div.xpath("./h1/span/text()").extract_first()
            content_div = wrapper_div.xpath("./div[@id='content']")
            article_div = content_div.xpath("//*/div[@class='article']")
            # mainpic_div = article_div.xpath("//*/div[@id='mainpic']")
            # item['pic_url'] = mainpic_div.xpath("./a/@href").extract_first()
            info_div = content_div.xpath("//*/div[@id='info']")
            isbn_span = info_div.xpath("./span[last()]")
            isbn = isbn_span.xpath("./following-sibling::text()").extract_first()
            item['isbn'] = re.sub(r' ','',re.sub(r'\u3000','',re.sub(r'\n','',isbn)))
            # item['author'] = info_div.xpath("./span/a/text()").extract_first()
            intro_div_str = ''.join(article_div.xpath("//*/div[@class='intro']/node()").extract())
            if intro_div_str.find('</p><p><a href="javascript:void(0)" class="j a_show_full">(展开全部)</a></p>') >= 0:
                intro_str_arr = intro_div_str.split('</p><p><a href="javascript:void(0)" class="j a_show_full">(展开全部)</a></p>')
                intro_div_str = intro_str_arr[-1]
            item['content'] = re.sub(r'\n','',intro_div_str)
            cnt = redis_instance.hincrby('page_hash', item['current_page'], 1)
            yield item
            logger.info('===>Crawled book successfully for url [%s], [%s], [%s]' %(response.url, item['current_page'], cnt))
            cnt_book_added = redis_instance.incr("cnt_book_added")
            if cnt_book_added and cnt_book_added > 300:
                redis_instance.delete("cnt_book_added")
                logger.info('<<<sleeping 1.5 hour...')
                time.sleep(5400)
                logger.info('...wake on>>>')
            if cnt >= 20:
                logger.info('===>Successfully crawled page [%s]' %item['current_page'])
                next_page = int(item['current_page'].split(':')[2]) + 20
                yield scrapy.Request('https://book.douban.com/tag/%s?start=%d&type=T' %(tag_name, next_page), callback=self.parse, dont_filter=False)
        else:
            logger.info('===>Crawled book failed for url[%s], status:%d' %(response.url, response.status))


