
# -*- coding: utf-8 -*-
"""
Created on Thu May 11 22:28:56 2017

@author: claude
"""
import re
import os
import sys
import pymongo
from pymongo import MongoClient
import jieba
import jieba.analyse
#import operator

reload(sys) 
sys.setdefaultencoding( "utf-8" ) 

DESCRP_PATH = "./descriptions"

def RemoveEmoji(s):
    """
    Remove moji
    """
    return re.sub(r'\[.*\]','',s)
    
def output_format(dict_item):
    text = str(dict_item['author']) + "\t" +str(dict_item['_id']) + "\n : " + str(dict_item['product']) + " 喜欢: "+ str(dict_item["like_score"]) + " 收藏: " + str(dict_item["collect_score"])  +      "\n" + str(dict_item['description']) + "\n" 
    if len(dict_item['picture_url'])>1:
        for pic_url in dict_item['picture_url']:
            text += "![](" + pic_url +")\n"
    return [dict_item["_id"], text]
    
def output_text(dict_item):
    return str(dict_item['description']) 

def evalue_scores(post):
    len(post["picture_url"])+post["comments_score"]+post["collect_score"]*5+post["like_score"]*2
    
class Analysis():
    collection_item = 'searched'
    flag = 1
    def __init__(self, keyword):
        self.client = MongoClient('localhost', 27017)
        self.db = self.client.xiaohongshu
        self.keyword = keyword
        self.dict_item = {}
        self.dict_item["keyword"] = self.keyword
        self.result = []
        #self.get_items()
        self.exist_in_searched = False
    def set_flag(self, flag):
        self.flag = flag
    def get_items(self):
        if self.db[self.collection_item].find({"keyword": keyword}).count()!=0:
            #print "get result from db.searched"
            post = self.db[self.collection_item].find_one({"keyword": keyword})
            self.dict_item["ids"] = post["ids"]
            self.dict_item["total_num"] = post["total_num"]
            self.exist_in_searched = True
        else:
            self.exist_in_searched = False
            #self.cursor = self.query_items()
            #if self.cursor.clone().count()<50:
            #scrapy result
            if self.flag == 1:
                os.system("scrapy crawl search -a keywords=\""+keyword+"\" ")
            elif self.flag == 2:
                os.system("scrapy crawl search2 -a keywords=\""+keyword+"\" ")
            self.cursor = self.query_items()
            #print "searh result and save to db.searched"
            self.dict_item["total_num"] = self.cursor.clone().count()              
            self.dict_item["ids"] = self.get_top_notes()
            try:
                self.db[self.collection_item].insert(self.dict_item)
            except pymongo.errors.DuplicateKeyError:
                pass
        for item_id in self.dict_item["ids"]:
            self.result.append(self.db.items.find_one({"_id":item_id}))
    def query_items(self):
        pattern = re.compile(".*"+self.keyword+".*")
        return self.db.items.find({"$or": [{"product":pattern}, {"title":pattern }]})
    def get_results_num(self):
        return self.dict_item["total_num"]
    def get_top_notes(self):
        scores = []
        for post in self.cursor.clone():
            score = evalue_scores(post)
            scores.append(score)
        sorted_idx =  sorted(range(len(scores)), key=lambda k: scores[k], reverse=True)#sorted_idx =  [ i for (i,j) in sorted(enumerate(scores), key=operator.itemgetter(1))]
        id_array = []
        posts = self.cursor.clone()
        for idx in sorted_idx:
            id_array.append(posts[idx]["_id"])
        return id_array
        
    def generate_md(self, num):
        text_array = []
        for idx, post in enumerate(self.result):
            if idx>num:
                break            
            [set_id, text] = output_format(post)
            text_array.append(text)
        return text_array
    def extract_tags(self, num):
        all_text = []
        for idx, post in enumerate(self.result):
            if idx>num:
                break
            text = output_text(post)    
            all_text.append(RemoveEmoji(text))
        jieba.load_userdict("userdict.txt")       
        #seg_list = jieba.cut(all_text[0], cut_all=True)
        #print "Full Mode:", "/ ".join(seg_list) 
        #print "Top %d keywords:"%num
        text_str = ""
        for t in  all_text:
            text_str += t
        tags = jieba.analyse.extract_tags(text_str, num)
        return tags

if __name__=="__main__":
    """
    support only 1 keyword
    """
    if len(sys.argv)==1:
        keyword = "rafa"
    elif len(sys.argv)==2:
        keyword = sys.argv[1]
    elif len(sys.argv)==3:
        print int(sys.argv[1]),
        keyword = sys.argv[2]
    print "keyword: ",keyword
    if os.path.exists(DESCRP_PATH+"/"+keyword+'.txt'):
        print "%s already existed!" %keyword
        exit
    else:
        analysis = Analysis(keyword)
        if len(sys.argv)==3:        
            analysis.set_flag(int(sys.argv[1]))
            analysis.get_items()
        else:
            analysis.set_flag(1)
            analysis.get_items()
            if analysis.exist_in_searched==False:
                analysis.set_flag(2)
                analysis.get_items()            
        if not os.path.exists(DESCRP_PATH):
            os.makedirs(DESCRP_PATH)
        if os.path.isfile(DESCRP_PATH+"/"+keyword+'.txt'):
            exit
        with open(DESCRP_PATH+"/"+keyword+'.txt', 'w') as f:
            f.write(repr(analysis.get_results_num()))
            tags = "\n"
            for tag in analysis.extract_tags(20):
                tags += tag+" "
            f.write(tags+'\n')
            for text in analysis.generate_md(20):
                f.write(text+'\n')
