# -*- coding: utf-8 -*-
import scrapy
from scrapy.linkextractors import LinkExtractor
from scrapy.spiders import CrawlSpider, Rule
from jianshu.items import JianshuItem
from jianshu.items import JianshuArticleItem
import time
import re
import itertools
import sys
reload(sys)   
sys.setdefaultencoding('utf8') 

class JianshuspiderSpider(CrawlSpider):
    name = 'jianshuspider'
    allowed_domains = ['www.jianshu.com']
    start_urls = ['http://www.jianshu.com/']

    rules = (
        #Rule(LinkExtractor(allow=r'www.jianshu.com'), follow=True),
        #提取用户信息规则
        Rule(LinkExtractor(allow=r'/users/'),  callback='parse_item', follow=True),
        Rule(LinkExtractor(allow=r'/u/'),   callback='parse_item',follow=True),
        #提取文章信息规则
        Rule(LinkExtractor(allow=r'/p/'), callback='parse_item2', follow=True),
            )

    def parse_item(self, response):
        item = JianshuItem()
        users_url = response.url
        if 'follow'  not in users_url :
            if 'subscriptions' not in users_url:
                if 'liked' not in users_url:
                    if 'order_by' not in users_url:
                        if 'shared' not in users_url:
                            if 'timeline' not in users_url:
                                title = response.xpath('//div[@class="title"]/a[@class="name"]/text()').extract()
                                users_url = response.url
                                #关注人数、粉丝数量、文章数量
                                info = response.xpath('//div[@class="info"]/ul/li/div/a/p/text()').extract()
                                #print len(info)
                                #字数、收获喜欢数
                                info2 = response.xpath('//div[@class="info"]/ul/li/div/p/text()').extract()
                                intro = response.xpath('//div[@class="js-intro"]/text()').extract()
                                intro ="-*-".join(itertools.chain(intro)).replace(';','')

                                if len(info) == 3 and len(info2) == 2 and len(title) == 1 :
                                    print "Name:%s******URL:%s"%(title[0],users_url)
                                    #用户名
                                    item['name'] = title[0]
                                    #用户个人主页链接
                                    item['users_url'] = users_url
                                    #关注量
                                    item['attention'] = info[0]
                                    #粉丝数
                                    item['fans'] = info[1]
                                    #文章数
                                    item['article'] = info[2]
                                    #字数
                                    item['words_num'] = info2[0]
                                    #收获喜欢
                                    item['gain_like'] = info2[1]
                                    #收获喜欢
                                    item['intro']  = intro
                                    #time.sleep(2)
                                return item

    def parse_item2(self, response):
        item = JianshuArticleItem()
        article_url = response.url
        #判断请求链接是否有其他字符串，对请求链接过滤
        if 'index-banner' not in article_url :
            if 'comments' not in article_url :
                if '/users/' not in article_url :
                    if '/u/' not in article_url :
                        html = response.body
                        #文章标题
                        item['article_title'] = response.xpath('/html/head/title/text()').extract()
                        #print article_title[0]

                        #文章页面链接
                        item['article_url'] = [article_url]

                        #作者
                        item['article_autho'] = response.xpath('/html/body/div[1]/div[1]/div[1]/div[1]/div/span[2]/a/text()').extract()
                        #print article_autho[0]

                        #最后编辑时间
                        item['article_edit_time'] = response.xpath('/html/body/div[1]/div[1]/div[1]/div[1]/div/div/span[1]/text()').extract()
                        #print article_edit_time[0]

                        #字数
                        item['article_num'] = response.xpath('/html/body/div[1]/div[1]/div[1]/div[1]/div/div/span[2]/text()').extract()
                        #print article_num[0]

                        #阅读数
                        #article_read = response.xpath('/html/body/div[1]/div[1]/div[1]/div[1]/div/div/span[3]/text()')
                        com = re.compile(r'"views_count":[0-9]{1,10},')
                        article_read = re.findall(com,html)
                        #print article_read[0][14:-1]
                        item['article_read'] = [article_read[0][14:-1]]

                        #评论数
                        #article_comment = response.xpath('/html/body/div[1]/div[1]/div[1]/div[1]/div/div/span[4]/text()')
                        com = re.compile(r'"comments_count":[0-9]{1,10},')
                        article_comment = re.findall(com,html)
                        #print article_comment[0][17:-1]
                        item['article_comment'] = [article_comment[0][17:-1]]

                        #喜欢数
                        #article_like = response.xpath('/html/body/div[1]/div[1]/div[1]/div[1]/div/div/span[5]/text()')
                        com = re.compile(r'"likes_count":[0-9]{1,10},')
                        article_like = re.findall(com,html)
                        #print article_like[0][14:-1]
                        item['article_like'] = article_like[0][14:-1]

                        #赞赏数
                        #article_admire = response.xpath('/html/body/div[1]/div[1]/div[1]/div[1]/div/div/span[6]/text()')
                        com = re.compile(r'"total_rewards_count":[0-9]{1,10},')
                        article_admire = re.findall(com,html)
                        #print article_admire[0][22:-1]
                        item['article_admire'] = article_admire[0][22:-1]

                        #作者信息：写了 **** 字，被 **** 人关注，获得了 **** 个喜欢
                        #article_autho_info = response.xpath('/html/body/div[1]/div[1]/div[2]/div[1]/p/text()')
                        #print article_autho_info

                        #作者签名信息
                        item['article_autho_intro'] = response.xpath('/html/body/div[1]/div[1]/div[2]/div[2]/text()').extract()
                        #print article_autho_intro[0]

                        #正文——文本——信息
                        article_txt = response.xpath('/html/body/div[1]/div[1]/div[1]/div[2]/p/text()').extract()
                        item['article_txt'] =["\n".join(itertools.chain(article_txt)).replace(',\n',',')]
                        #print article_txt

                        #微博长图链接
                        #weibo_image = response.xpath('//div[@class="share-group"]/a[3]/@herf').extract()
                        com = re.compile(r'<a class="share-circle" data-toggle="tooltip" href=".*?\.jpg"')
                        weibo_image = re.findall(com,html)
                        #print weibo_image[0][52:-1]
                        item['weibo_image'] = [weibo_image[0][52:-1]]

                        #line = article_title + article_autho + article_edit_time + article_num + article_read ,\
                        #        article_comment + article_like + article_admire + article_autho_intro + article_txt + weibo_image

                        #with open('E:\jiaocheng\F\Project\jianshu_article\data\\article.txt','a') as f:
                        #    f.write(line)
                        #f.close()
                        return item











"""
        html = response.body
        #文章标题
        item['article_title'] = article[0]
        #作者
        item['article_autho']
        #最后编辑时间
        item['article_edit_time']
        #字数
        item['article_num']
        #阅读数
        item['article_read']    
        #评论数
        item['article_comment']
        #喜欢数
        item['article_like']
        #赞赏数
        item['article_admire']
        #作者信息：写了 **** 字，被 **** 人关注，获得了 **** 个喜欢
        item['article_autho_info']
        #作者签名信息
        item['article_autho_intro']
        #正文——文本——信息
        item['article_txt']
        #文章页面链接
        item['article_url']
        return item



"""