# -*- coding: utf-8 -*-
import re
import urllib

import scrapy
from scrapy.http import Request

from scrapyHexun.items import ScrapyhexunItem


class HexunSpider(scrapy.Spider):
    name = 'hexun'
    allowed_domains = ['hexun.com']
    
    uid = "rubbervalley" #博主ID
    #总页数
    totalPages = -1
    
    def start_requests(self):
        #首次抓取
        yield Request("http://"+self.uid+".blog.hexun.com/")
    
    def parse(self, response):
        item = ScrapyhexunItem()
        item["name"] = response.xpath("//span[@class='ArticleTitleText']/a/text()").extract()
        item["url"] = response.xpath("//span[@class='ArticleTitleText']/a/@href").extract()
        item["hits"] = response.xpath("//div[@class='ArticleInfo']/span[1]/text()").extract()
        item["comment"] = response.xpath("//div[@class='ArticleInfo']/a[2]/span/text()").extract()
        '''
        #使用urllib和re模块抓取阅读数和评论数
        #1 re匹配请求url
        clickUrlPatttern = '<script type="text/javascript" src="(http://click.tool.hexun.com/.*?)"></script>'
        clickUrl = re.compile(clickUrlPatttern).findall(str(response.body))[0]
        
        #2 使用urllib访问clickUrl获取数据
        opener = urllib.request.build_opener()
        opener.addheaders = [('User-Agent','Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/55.0.2883.87 Safari/537.36')]
        urllib.request.install_opener(opener)
        clickDatas = urllib.request.urlopen(clickUrl).read().decode("utf-8")
        
        #3 re匹配出阅读数和评论数
        hitsParttern = "'click\d*?','(\d*?)'"
        commentParttern = "'comment\d*?','(\d*?)'"
        
        # 这里拿到的数据是两份，同一个博文的点击数出现两次，这个需要进一步处理，比如说 item["hits"] = item["hits"][0,len(item["hits"])/2]
        item["hits"] = re.compile(hitsParttern).findall(clickDatas)
        item["hits"] = item["hits"][0,len(item["hits"])/2] #使用切片
        item["comment"] = re.compile(commentParttern).findall(clickDatas)
        '''
        yield item
        
        if self.totalPages == -1: #确保该逻辑只执行一次
            # 获取该博主的总页数
            totalPagesParttren = 'blog.hexun.com/p(\d*?)/'
            self.totalPages = re.compile(totalPagesParttren).findall(str(response.body))[-2]
            print("--->总页数：",self.totalPages)
            
        for i in range(2,int(self.totalPages)+1):
        #for i in range(2,3):
            url = "http://"+str(self.uid)+".blog.hexun.com/p"+str(i)+"/default.html"
            yield Request(url,callback=self.parse)