#-*-coding:utf-8-*-

import time
from pprint import pprint
# from scrapy import log 
#from scrapy.spider import BaseSpider
from scrapy.spiders import Spider
from scrapy.selector import HtmlXPathSelector
from scrapy.http import Request
from jianshu_crawler.items import JianshuCrawlerItem
from jianshu_crawler.utils.select_result import list_first_item,strip_null,deduplication,clean_url
import logging

class WoaiduSpider(Spider):
    name = "jianshu"
    start_urls = (
            'http://www.jianshu.com/c/aaf0e06c2815',
            'http://www.jianshu.com/c/ce7bb82a4a15?utm_source=desktop&utm_medium=notes-included-collection',
#            'http://www.jianshu.com/c/aaf0e06c2815?order_by=commented_at&page=2',
    )

    def parse(self,response):
        
        response_selector = HtmlXPathSelector(response)
        next_link = ''
        next_link = (response_selector.select(u'//*[@class="note-list"]/li/div/a/@href').extract())
#        next_link = (response_selector.select(u'//*[@class="avatar"]/@href').extract())
        
        #异步请求
        
        #url 保存redis
        print response
#        if next_link:
#            next_link = clean_url(response.url,next_link,response.encoding)
#            print next_link
##            yield Request(url=next_link, callback=self.parse)
#        pass
#        logging.debug("-----------xiaowan--------")
#        logging.debug(response_selector.select(u'//*[@id="list-container"]/ul/li/div/h4/a/@href').extract())
#        for detail_link in response_selector.select(u'//*[@id="list-container"]/ul/li/div/h4/a/@href').extract():
##             log.msg("-----------xiaowan--------")
#            logging.debug("-----------xiaowan--------")
#            logging.debug(detail_link)
#            if detail_link:
#                detail_link = clean_url(response.url,detail_link,response.encoding)
#                yield Request(url=detail_link, callback=self.parse_detail)
#                break #测试

    def parse_detail(self, response):
        woaidu_item = JianshuCrawlerItem()
        response_selector = HtmlXPathSelector(response)
        woaidu_item['content'] = list_first_item(response_selector.select(u'//*[@id="flag"]/div[2]/div[2]/div/div[4]/text()').extract())
        woaidu_item['author'] = list_first_item(response_selector.select(u'/html/body/div[1]/div[1]/div[1]/div[1]/div/span[2]/a/text()').extract())
        woaidu_item['title'] = list_first_item(response_selector.select(u'/html/body/div[1]/div[1]/div[1]/h1/text()').extract()).strip()
       
        woaidu_item['words_num'] = list_first_item(response_selector.select(u'//*[@id="flag"]/div[2]/div[2]/div/div[2]/span[1]/text()').extract())
        woaidu_item['read_num'] = list_first_item(response_selector.select(u'//*[@id="flag"]/div[2]/div[2]/div/div[2]/span[2]/text()').extract())
        woaidu_item['comment_num'] = list_first_item(response_selector.select(u'//*[@id="flag"]/div[2]/div[2]/div/div[2]/span[3]/text()').extract())
        woaidu_item['link_num'] = list_first_item(response_selector.select(u'//*[@id="flag"]/div[2]/div[2]/div/div[2]/span[4]/text()').extract())
        woaidu_item['push_time'] = list_first_item(response_selector.select(u'//*[@id="flag"]/div[2]/div[2]/div/div[1]/span[2]/text()').extract())
#         woaidu_item['book_covor_image_url'] = list_first_item(response_selector.select('//div[@class="hong"][1]/img/@src').extract())

#         download = []
#         for i in response_selector.select('//div[contains(@class,"xiazai_xiao")]')[1:]:
#             download_item = {}
#             #下载书籍的链接
#             download_item['url'] = \
#                 strip_null( \
#                     deduplication(\
#                         [\
#                             list_first_item(i.select('./div')[0].select('./a/@href').extract()),\
#                             list_first_item(i.select('./div')[1].select('./a/@href').extract())\
#                         ]\
#                     )\
#                 )
#             
#             download_item['progress'] = list_first_item(i.select('./div')[2].select('./text()').extract())
#             download_item['update_time'] = list_first_item(i.select('./div')[3].select('./text()').extract())
#             download_item['source_site'] = \
#                     [\
#                         list_first_item(i.select('./div')[4].select('./a/text()').extract()),\
#                         list_first_item(i.select('./div')[4].select('./a/@href').extract())\
#                     ]\
# 
#             download.append(download_item)
# 
#         woaidu_item['book_download'] = download
#         woaidu_item['original_url'] = response.url
#         
#         Logger.info(self, "-----------woaidu_item------------")
#         Logger.info(self, woaidu_item)
        logging.debug("-----------woaidu_item--------")
        logging.debug(woaidu_item)
#         logging.debug(response_selector.select(u'//*[@id="flag"]/div[2]/div[2]/div/div[2]/span[1]/text()').extract())
        yield woaidu_item
