# -*- coding: utf-8 -*-
"""
Created on Mon May  1 20:58:58 2017

@author: claude
"""


from scrapy.spiders import CrawlSpider, Rule, Request
import requests
from scrapy.linkextractors import LinkExtractor
from xiaohongshu.items import XiaohongshuItem, XiaohongshuGood, XiaohongshuAlbum


CONTINUE_FLAG = True #False: just current page (depth=1); True: crawling all the pages (depth>=1)

        
class QuotesSpider(CrawlSpider):
    """
    Usage: scrapy crawl quotes -a start_url="http://www.xiaohongshu.com/discovery/item/545a04c1d6e4a94fa452999d"    
    or: scrapy crawl quotes
    """    
    name = 'quotes'
    def __init__(self, start_url=None, *args, **kwargs):
        super(QuotesSpider, self).__init__(*args, **kwargs)
        if start_url==None:
            pass
        else:
            self.start_urls = [start_url]
        
    allowed_domains = ['xiaohongshu.com']
    start_urls = [#'http://www.xiaohongshu.com/discovery/category/52ce1c02b4c4d649b58b892c',
                          'http://www.xiaohongshu.com/discovery/item/545a04c1d6e4a94fa452999d',
                          #'http://www.xiaohongshu.com/discovery/item/566af1453fef9237cf19f150',
                         #'http://www.xiaohongshu.com/selected_board/57a41d8d9903d69b580281c7',    
                         #'http://www.xiaohongshu.com/discovery/item/545a04c1d6e4a94fa452999d/related_discoveries',
                         #'http://www.xiaohongshu.com/discovery/item/545a04c1d6e4a94fa452999d/related_goods',
                  ]

    rules = (
        Rule(LinkExtractor(allow=('\/discovery\/item', ), deny=('\/related_discoveries', '\/related_goods', '\/comments')), callback='parse_item', follow=CONTINUE_FLAG),
        Rule(LinkExtractor(allow=('\/discovery\/item.*\/related_discoveries', )), callback='parse_related_discoveries', follow=CONTINUE_FLAG),       
        #Rule(LinkExtractor(allow=('\/goods', )), callback='parse_good', follow=CONTINUE_FLAG),
        Rule(LinkExtractor(allow=('\/selected_board', )), callback='parse_album', follow=CONTINUE_FLAG),
    )
    
    
#################### parse related discoveries ####################
    def parse_related_discoveries(self, response):
        related_items_id = (response.url).split("/")[-2]
        id_array = []
        for page in range(1, 200):
            related_items_url = "http://www.xiaohongshu.com/api/snsweb/v1/get_discovery_related_discoveries?page=%d&tag_id=&discovery_id=%s" % (page, related_items_id)            
            resp = requests.get(related_items_url)
            js = resp.json()
            if len(js["data"])<2:
                break
            else:
                for item in js["data"]:
                    id_array.append(item["id"])
        base_url = "http://www.xiaohongshu.com/discovery/item/"
        for _id in id_array:
            url = base_url + "%s" % _id # url construction
            self.logger.debug('Hi, %s', url)
            yield Request(url, callback=self.parse_item)   
       
    def get_related_discoveries(self, response):
        """
        sub function in parse_item        
        """
        related_items_id = (response.url).split("/")[-1]
        id_array = []
        for page in range(1, 200):
            related_items_url = "http://www.xiaohongshu.com/api/snsweb/v1/get_discovery_related_discoveries?page=%d&tag_id=&discovery_id=%s" % (page, related_items_id)            
            self.logger.info('get_related_discoveries, %s', related_items_url)            
            resp = requests.get(related_items_url)
            js = resp.json()
            if len(js["data"])<2:
                break
            else:
                for item in js["data"]:
                    id_array.append(item["id"]) 
        return id_array
####################### end ######################            
#################### parse item ####################
    def parse_item(self, response):
        """
        item:       note, URL=/discovery/item/*
        good:      goods, URL=/goods/*
        album:    album, URL=/selected_board/*
        """
        self.logger.info('Hi, this is an ITEM page! %s', response.url)
        item = XiaohongshuItem()
        self.get_item_id(response,item)
        self.get_info(response,item)
        self.get_item_product(response,item)
#        self.get_author(response, item)
#        self.get_author_id(response, item)
#        self.get_item_title(response,item)
#        self.get_scores(response, item)
#        self.get_item_picture(response,item)
        self.get_item_description(response,item)
        self.get_item_recommendation(response,item)
        self.get_item_related_note(response,item)
        self.get_item_album(response,item)
        yield item
        
    def get_item_id(self, response, item):
        item['_id'] = response.url.split("/")[-1]
        
    def get_info(self, response, item):
        """
        author, author_id, title, collect_score, like_score, comments_score, picture_url
        """
        section_1 = response.xpath('/html/body/section[1]')
        item['author'] = section_1.xpath('//div[2]/div[2]/a/text()').extract_first()
        item['author_id'] = section_1.xpath('//div[2]/div[2]').css('a::attr(href)').extract_first()
        item['title'] = section_1.xpath('//div[3]/h2/text()').extract_first()
        s = section_1.xpath('//div[4]').css('span::text').extract()    
        if len(s)>=2:
            if len(s[0])>2:
                item['collect_score'] = int(s[0].split(u'\xb7')[-1])
            else:
                item['collect_score'] = 0
            if len(s[1])>1:
                item['like_score'] = int(s[1].split(u'\xb7')[-1])
            else:
                item['like_score'] = 0
        else:
            item['like_score'] = -1
            item['collect_score'] = -1
        s = section_1.xpath('//div[5]/a/em/text()').extract_first()
        if s == None:
            item['comments_score'] = 0
        else:
            item['comments_score'] = int()
        pic_array = []
        for pic in section_1.css("img.image::attr(data-src)").extract():
            pic_array.append(pic)
        item['picture_url'] = pic_array
            
#    def get_author(self, response, item):
#        item['author'] = response.xpath('/html/body/section[1]/div[2]/div[2]/a/text()').extract_first()
#    def get_author_id(self, response, item):
#        item['author_id'] = response.xpath('/html/body/section[1]/div[2]/div[2]').css('a::attr(href)').extract_first()
#    def get_item_title(self, response, item):
#        item['title'] = response.xpath('/html/body/section[1]/div[3]/h2/text()').extract_first()
#    def get_scores(self, response, item):
#        #s = response.xpath('/html/body/section[1]/div[4]/button/span/text()').extract()
#        s = response.xpath('/html/body/section[1]/div[4]').css('span::text').extract()    
#        if len(s[0])>2:
#            item['collect_score'] = int(s[0].split(u'\xb7')[-1])
#        else:
#            item['collect_score'] = 0
#        if len(s[1])>1:
#            item['like_score'] = int(s[1].split(u'\xb7')[-1])
#        else:
#            item['like_score'] = 0
#        s = response.xpath('/html/body/section[1]/div[5]/a/em/text()').extract_first()
#        if s == None:
#            item['comments_score'] = 0
#        else:
#            item['comments_score'] = int()
#    def get_item_picture(self, response, item):
#        pic_array = []
#        pictures = response.xpath('/html/body/section[1]')#response.css("section.note-container")
#        for pic in pictures.css("img.image::attr(data-src)").extract():
#            pic_array.append(pic)
#        item['picture_url'] = pic_array
    def get_item_product(self, response, item):
        item['product'] = response.xpath('/html/body/section[2]/*[@id="note_item"]/div/div[2]/h1/text()').extract_first()        
    def get_item_description(self, response, item):
        texts = ""
        maintext = response.css("div.note-desc")
        for sentence in maintext.css("p.content::text").extract():
            texts += sentence
        item['description'] = texts
    def get_item_recommendation(self, response, item):
        item['recommendation'] = response.xpath('/html/body/section[3]/ul').css('a::attr(href)').extract()
    def get_item_related_note(self, response, item):
        item['related_note']  = response.xpath('/html/body/section[4]/*[@id="note_list"]').css('a::attr(href)').extract()
        id_array = self.get_related_discoveries(response)
        item['related_note'] += id_array
    def get_item_album(self, response, item):
        item['album']  = response.xpath('/html/body/section[5]').css('a.board::attr(href)').extract()
####################### end ######################
        
#################### parse good ####################       
    def parse_good(self, response):
        self.logger.info('Hi, this is a GOOD page! %s', response.url)

####################### end ######################
        
#################### parse album ####################        
    def parse_album(self, response):
        self.logger.info('Hi, this is a SELECTED_BOARD page! %s', response.url)
        item = XiaohongshuAlbum()
        self.get_album_id(response,item)
        self.get_album_title(response,item)
        self.get_album_items(response,item)
        yield item
        
    def get_album_id(self, response, item):
        item['_id'] = response.url.split("/")[-1]
    def get_album_title(self, response, item):
        item['title'] = response.xpath('string(/html/body/section[1]/h1/text())').extract_first()
    def get_album_items(self, response, item):
        album_items = []
        for it in response.xpath('/html/body/section[2]/*[@id="note_list"]').css('a::attr(href)').extract():
            if it.split("/")[-2]=='item':
                album_items.append(it.split("/")[-1])
        item['items'] = album_items
####################### end ######################                    
                    
