#-*-coding:utf-8-*-

import time
from pprint import pprint
# from scrapy import log 
#from scrapy.spider import BaseSpider
from scrapy.spiders import Spider
from scrapy.selector import HtmlXPathSelector
from scrapy.http import Request
from jianshu_crawler.items import JianshuCrawlerItem
from jianshu_crawler.utils.select_result import list_first_item,strip_null,deduplication,clean_url
import logging
import json
from scrapy.utils.response import open_in_browser

class WoaiduSearchDetailSpider(Spider):
    name = "jianshu_search_detail"
    custom_settings = {
       "DEFAULT_REQUEST_HEADERS":{
             'host':'www.jianshu.com',
             'accept':'application/json',
             'accept-encoding':'gzip, deflate, sdch',
             'accept-language':'zh-CN,zh;q=0.8',
             'user-agent':'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_4) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/48.0.2564.97 Safari/537.36',
             'cookie':'remember_user_token=W1s2MjA1Njg2XSwiJDJhJDEwJFVqSGM4VDBHTjB3d2JtaGYvOEpsVy4iLCIxNDk1NzgzNDMzLjkzMTk0NzIiXQ%3D%3D--b19ecd2347a97999faa28240a9329798d151d973; _ga=GA1.2.807242886.1495783177; _gid=GA1.2.448947094.1495866370; Hm_lvt_0c0e9d9b1e7d617b3e6842e85b9fb068=1495783177,1495853844; Hm_lpvt_0c0e9d9b1e7d617b3e6842e85b9fb068=1495866370; _session_id=K0IrRURBZlRRcUdBRW9lS2xMV1l5N0RJZS95WHQybnRsMU9RU2JzZU92Zk5qV2JmVEtkYXcwbnVrbTBMRGRNa2dBNVZFZVdDekl6ZUpUS2ZpU2ZzY3lUZjdPV1N1M3JVb1FmbnVZdmRkSHhxTjRrenV5M25kWFFYR2pkSEFIN0lNZTMxOWZzSDJmcEpMTE9DNHpraktmWmY1MmVrbDZQSXJYUDZCSy80bG1EZW5yVU1QL0VVOG1IaHRiUy9kOXhET0M5b2Yva1QxK2w3aitHZnl1UnlMVVFMS3JjOGp3QnNXdDNLU3FnVjRPNEgyVjN4RmJOUVlvUzY2S3dDZ2MvNVVpME1kN00rdEVhdkFVSlZsQTJjNklTSHg4cXpEaGJqOHpNTEl4dDBFS2lOeVdxKysvQTNaVUl0Ty9ubVRXUWZGRmJjWjZaZlFucmxJRkQ1UzlHeCtLMlVuajd4YXcrZXF5SFIyakxadW5uT3RWUUZ1UTdpYWRIQnljNmIwbmRhbjNFV3MrSFZhSnkwM3RyR1J0NEVTRytlSWlQQXFSMlBEQTNleS9tU1hNRT0tLWdQKzU4UFdFNjdDR1BiM1dQaFBtRXc9PQ%3D%3D--ceff406358661fdd859929066f9b53e26635bd80'
         },
     } 
    start_urls = (
#            'http://www.jianshu.com/search?q=php&page=1&type=note',
            'http://www.jianshu.com/search/do?q=php&type=note&page=1&order_by=default',
    )

    def parse(self,response):
        
        jsonBody = json.loads(response.body)
        if(jsonBody['page'] < jsonBody['total_pages']):
            nextPage = jsonBody['page']+1
            next_link = ''.join(['http://www.jianshu.com/search/do?q=php&type=note&page=', str(nextPage) , '&order_by=default'])
#            print next_link
#        print jsonBody['page']
#        print jsonBody['entries']
        
#        if next_link:
#            yield Request(url=next_link, callback=self.parse)
#        logging.debug(response_selector.select(u'//*[@id="list-container"]/ul/li/div/h4/a/@href').extract())
        
        requestHeader = {
             'Accept' : 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8',
             'Accept-Encoding' : 'gzip, deflate, sdch',
             'Accept-Language' : 'zh-CN,zh;q=0.8',
             'Connection' : 'keep-alive',
             'Host' : 'www.jianshu.com'
        }
        for detail_link in jsonBody['entries']:
            if detail_link['slug']:
                artLink = '';
                artLink = ''.join(['http://www.jianshu.com/p/',detail_link['slug']])
                print artLink
                yield Request(url=artLink, callback=self.parse_detail, method='GET',headers=requestHeader)
            break #测试
    
    #文章内页
    def parse_detail(self, response):
        print '9999'
        woaidu_item = JianshuCrawlerItem()
#        response_selector = HtmlXPathSelector(response)
        
        print response
#        woaidu_item['content'] = list_first_item(response_selector.select(u'//*[@id="flag"]/div[2]/div[2]/div/div[4]/text()').extract())
#        woaidu_item['author'] = list_first_item(response_selector.select(u'/html/body/div[1]/div[1]/div[1]/div[1]/div/span[2]/a/text()').extract())
#        woaidu_item['title'] = list_first_item(response_selector.select(u'/html/body/div[1]/div[1]/div[1]/h1/text()').extract()).strip()
#       
#        woaidu_item['words_num'] = list_first_item(response_selector.select(u'//*[@id="flag"]/div[2]/div[2]/div/div[2]/span[1]/text()').extract())
#        woaidu_item['read_num'] = list_first_item(response_selector.select(u'//*[@id="flag"]/div[2]/div[2]/div/div[2]/span[2]/text()').extract())
#        woaidu_item['comment_num'] = list_first_item(response_selector.select(u'//*[@id="flag"]/div[2]/div[2]/div/div[2]/span[3]/text()').extract())
#        woaidu_item['link_num'] = list_first_item(response_selector.select(u'//*[@id="flag"]/div[2]/div[2]/div/div[2]/span[4]/text()').extract())
#        woaidu_item['push_time'] = list_first_item(response_selector.select(u'//*[@id="flag"]/div[2]/div[2]/div/div[1]/span[2]/text()').extract())
#         woaidu_item['book_covor_image_url'] = list_first_item(response_selector.select('//div[@class="hong"][1]/img/@src').extract())

#         download = []
#         for i in response_selector.select('//div[contains(@class,"xiazai_xiao")]')[1:]:
#             download_item = {}
#             #下载书籍的链接
#             download_item['url'] = \
#                 strip_null( \
#                     deduplication(\
#                         [\
#                             list_first_item(i.select('./div')[0].select('./a/@href').extract()),\
#                             list_first_item(i.select('./div')[1].select('./a/@href').extract())\
#                         ]\
#                     )\
#                 )
#             
#             download_item['progress'] = list_first_item(i.select('./div')[2].select('./text()').extract())
#             download_item['update_time'] = list_first_item(i.select('./div')[3].select('./text()').extract())
#             download_item['source_site'] = \
#                     [\
#                         list_first_item(i.select('./div')[4].select('./a/text()').extract()),\
#                         list_first_item(i.select('./div')[4].select('./a/@href').extract())\
#                     ]\
# 
#             download.append(download_item)
# 
#         woaidu_item['book_download'] = download
#         woaidu_item['original_url'] = response.url
#         
#         Logger.info(self, "-----------woaidu_item------------")
#         Logger.info(self, woaidu_item)
#        logging.debug("-----------woaidu_item--------")
#        logging.debug(woaidu_item)
#         logging.debug(response_selector.select(u'//*[@id="flag"]/div[2]/div[2]/div/div[2]/span[1]/text()').extract())
        yield woaidu_item
