# -*- coding: utf-8 -*-

import preprocess.stopwords as Stopwords

import Data.UrlsDataFromBaidu as DataBaidu
import numpy as np
import jieba
import word2vec

class BaiduTitle_Embedding:
    
    # ===============================================================================
    # Aim: generate word (or character) embedding from URL titles cralwed from Baidu 
    # Input: Baidu Title
    # Output: Embedding 
    # ===============================================================================
    File_Embedding = 'Data/Baidu_Embedding.txt'
    File_Model = 'Data/Baidu_Embedding_pharase.bin'
    
    # categories' keyword 
    categories_zh = {'parse': [u'艺术',u'戏剧',
                         u'汽车',
                         u'财经',u'投资',
                         u'美妆',u'化妆',
                         u'商务',u'加盟',
                         u'动漫',u'漫画',
                         u'做饭',u'下厨',
                         u'手工',
                         u'亲子',u'母婴',
                         u'时尚',
                         u'快餐',u'外卖',
                         u'美食',
                         u'游戏',
                         u'健康',u'健身',
                         u'家装',u'装修',
                         u'手机',
                         u'电影',
                         u'音乐',u'听歌',
                         u'新闻',
                         u'户外',
                         u'宠物',
                         u'政治',
                         u'购物',
                         u'摄影',u'摄像',
                         u'社交',u'论坛',
                         u'体育',u'运动',
                         u'视频',
                         u'科技',
                         u'旅游'
                        ],
                'char': [u'艺',u'戏',u'剧',
                         u'车',
                         u'财',u'经',
                         u'妆',
                         u'盟',
                         u'漫',
                         u'厨',
                         u'子',u'母',u'婴',
                         u'时',u'尚',
                         u'快',u'外',
                         u'食',
                         u'游',
                         u'健',
                         u'装',
                         u'手',u'机',
                         u'影',
                         u'音',u'乐',
                         u'新',u'闻',
                         u'户',u'外',
                         u'宠',u'物',
                         u'政',
                         u'购',u'物',u'买',u'买',u'商',u'城',
                         u'摄',
                         u'社',u'坛',u'会',
                         u'体',u'运',u'动',
                         u'视',u'频',
                         u'科',u'技',
                         u'旅'
                        ]}         
   
    def __init__(self,X_split,X_test,CutMethod):
        
        dict_label_titles = self.read_data()
        self.write_data(dict_label_titles,CutMethod)
        word2vec.word2vec(self.File_Embedding, self.File_Model, size=200, verbose=True)
        dict_categories_simi = self.category_similarity(CutMethod)
        Train_Second_Title = BaiduTitle_Embedding.generate_train_title(X_split)
        Test_Second_Title = BaiduTitle_Embedding.generate_text_title(X_test)
        
        self.Train_Second_Title_distance = self.generate_distance_matrix(dict_categories_simi,Train_Second_Title,CutMethod)
        self.Test_Second_Title_distance = self.generate_distance_matrix(dict_categories_simi,Test_Second_Title,CutMethod)
        
    def read_data(self):
        
        # read Baidu Title and stored titles in the same categories
        data = DataBaidu.UrlDataFromBaidu().convert_to_two_level()
        urls_titles = data.X
        labels = data.Y
        dict_label_titles = {}

        for urlandtitle,label in zip(urls_titles,labels):
            if label not in dict_label_titles:
                dict_label_titles[label] = []
            if len(urlandtitle[1]) > 0:
                dict_label_titles[label].append(urlandtitle[1])
        return dict_label_titles
                
    def write_data(self,dict_label_titles,CutMethod):
        
        # cut the titles in the same categories and write to file
        
        file_w = open(self.File_Embedding,'w',encoding="utf-8")
        for key in dict_label_titles:
            for title in dict_label_titles[key]:
                title = title.strip()
                #title = title.decode("utf8")
                if CutMethod == 'parse':
                    cut = jieba.cut_for_search(title)
                elif CutMethod == 'char':
                    cut = title
                for i in cut:
                    try:
                        if Stopwords.justify_stop_words(i.strip()) == False:
                            #i = i.encode('utf-8')
                            file_w.write(i)
                            file_w.write(' ')
                    except UnicodeDecodeError as e:
                        print (e.message)
                file_w.write('\n')
            file_w.write('\n')
        file_w.close()
        
    def category_similarity(self,CutMethod):
        
        # calculate the similariy dictionary for each keyword
        
        model = word2vec.load(self.File_Model)
        dict_categories_simi = {}
        for c in self.categories_zh[CutMethod]:
            dict_categories_simi[c] = {}
            indexes, metrics = model.cosine(c,100) # top 100 similarity word
            word_similarity = model.generate_response(indexes, metrics).tolist()
            for x in word_similarity:
                word = x[0]
                simi = x[1]
                dict_categories_simi[c][word] = simi
        return dict_categories_simi
    
    @staticmethod
    def generate_train_title(X_split):
        Train_Second_Title = []
        for part in X_split:
            for row in part:
                Train_Second_Title.append(row[1].strip())
        return Train_Second_Title
    
    @staticmethod
    def generate_text_title(X_test):
        Test_Second_Title = []
        for row in X_test:
            Test_Second_Title.append(row[1].strip())
        return Test_Second_Title
    
    def generate_distance_matrix(self,dict_categories_simi,Second_Title,CutMethod):
        
        # ===============================================================================
        # Aim: generate distance matrix for labeld dataset
        # ===============================================================================
        Second_Title_distance = np.zeros((len(Second_Title),len(self.categories_zh[CutMethod])))
        for i in range(len(Second_Title)):
            title = Second_Title[i]
            if CutMethod == 'parse':
                cut = jieba.cut_for_search(title)
            elif CutMethod == 'char':
                cut = title
            for word in cut:
                for j in range(len(self.categories_zh[CutMethod])):
                    c = self.categories_zh[CutMethod][j]
                    if word in dict_categories_simi[c] and dict_categories_simi[c][word] > Second_Title_distance[i][j]: # find the highest similarity
                        Second_Title_distance[i][j] = dict_categories_simi[c][word]
        return Second_Title_distance

if __name__ == "__main__":
    import os
    os.chdir("..")

    d = BaiduTitle_Embedding()
