# -*- coding: utf-8 -*-
import scrapy
import pymongo
import json
from xiaohongshu.items import XiaohongshuSearch

MAX_PAGE_SEARCH = 4

class MongoClint():
    collection_item = 'items'
    def __init__(self):
        self.client = pymongo.MongoClient('localhost', 27017)
        self.db = self.client.xiaohongshu
        
    def check_item_in_db(self, item_id):
        if self.db.items.count({"_id":item_id})>0:
            return True
        else:
            return False
            
    def save_res_to_db(self, keywords, id_array):
        dict_item = {}
        dict_item["keyword"] = keywords
        dict_item["ids"] = list(set(id_array))
        try:
            self.db[self.collection_item].insert(dict_item)
        except pymongo.errors.DuplicateKeyError:
            print '_id %s has been existed', dict_item["_id"]
            #logger.debug('_id %s has been existed', dict_item["_id"]) 

class Search3Spider(scrapy.Spider):
    """
    Usage: scrapy crawl search3 -a keywords="金瓶安耐晒"    
    http://m.xiaohongshu.com/web_api/sns/v1/search/note?keyword=%E6%97%A0%E6%AF%94%E6%BB%B4&page=12
    http://m.xiaohongshu.com/web_api/sns/v1/search/note?keyword=cpb&page=5
    """    
    name = "search3"
    allowed_domains = ["http://m.xiaohongshu.com/web_api/sns/v1/search"]
    #start_urls = ['http://http://m.xiaohongshu.com/web_api/sns/v1/search/']
    item_url = 'http://www.xiaohongshu.com/discovery/item/'
    base_url = 'http://m.xiaohongshu.com/web_api/sns/v1/search/note?keyword='
    keyword = None
    mongodb = MongoClint()
        
    def get_keywords(self,keywords):
        arg_string = ""
        if isinstance(keywords, list):
            for word in keywords:
                arg_string += word+"+"
            arg_string = arg_string[:-1]
        else:
            arg_string = keywords
        return arg_string
        
    def __init__(self, keywords=None, *args, **kwargs):
        self.client = pymongo.MongoClient('localhost', 27017)
        self.db = self.client.xiaohongshu        
        super(Search3Spider, self).__init__(*args, **kwargs)
        if keywords==None:
            self.keyword = "cpb" #default
        else:
            self.keyword = self.get_keywords(keywords)                    
            print self.keyword 

    def start_requests(self):
        for page in range(1, MAX_PAGE_SEARCH):
            url = self.base_url+self.keyword+'&page='+repr(page)
            yield scrapy.Request(url=url, callback=self.parse_item)
                
    def parse_item(self, response):
        js = json.loads(response.body_as_unicode()) 
        if len(js["data"]["notes"])<2:
            yield {}
        else:
            item = XiaohongshuSearch()
            item["keyword"] = self.keyword
            item["data"] = []           
            #item["data"] = js["data"]["notes"]
            for x in js["data"]["notes"]:
                data_dict = {}
                data_dict["title"] = x["title"]
                data_dict["likes"] = x["likes"]
                data_dict["time"] = x["time"]
                data_dict["oid"] = x["id"]
                data_dict["user"] = x["user"]
                data_dict["images_list"] = x["images_list"]
                data_dict["desc"] = x["desc"]
                item["data"].append(data_dict)
            yield item