# -*- coding: utf-8 -*-
import preprocess.url_parser as Parser
import preprocess.gram as Gram
import preprocess.stopwords as Stopwords
import numpy as np
import jieba


class Data2Feature:
    
    # ============================================================================================================
    # Aim: Transfer titles and (or) websites to features
    #
    # Inputs: X: include websites and titles
    #     n1: control the begining of all-grams of websites  
    #     n2: control the end of all-grams of websites
    #     website: if Website == True: use website (url) information to extract features
    #     title: if Title == True: use title information to extract features
    #     cutmethod: if cutmethod == 'char', cut titles to characters; 
    #             if cutmethod == 'parse', parse titles to words. 
    #     add: if Add == True: count the number of features appeared in each data as input; else: only use 0-1
    #     stopwords: if Stopwords == True: remove stopwords such as punctuations
    #     min_web: feature minimum appeared times in whole dataset for website (url) information
    #     min_title: feature minimum appeared times in whole dataset for title information
    #
    # Outputs: Features
    # ============================================================================================================
    def __init__(self,X,n1,n2,website=True,title=False,cutmethod='char',add=True,stopwords=True,min_web=2,min_title=2):
        self.features = Data2Feature.data_to_feature(X,n1,n2,website,title,cutmethod,add,stopwords,min_web,min_title)
    
    @staticmethod
    def train_websites_to_feature_dict(train_websites,n1,n2,min_web):
        
        # =================================================================================
        # Aim: Get feature dictionary from training set (key: feature, value: index)
        # Main Inputs: Websites of Training Set
        # Outputs: Dictionary of each features' index and dictionary of each tokens' features
        # ==================================================================================
        
        cnt = 0
        dict_token_features = {} # get each token's feature
        dict_feature_index = {} # get each features' index
        dict_feature_count = {} # get each features' total count in training set

        for website in train_websites:
            tokens = Parser.UrlParser(website).names # website to tokens (sports.sohu.com => sports sohu)
            for token in tokens:
                if token not in dict_token_features:
                    dict_token_features[token] = []
                    for i in range(n1,n2+1):
                        features = Gram.letter_n_gram(token,i) # token to ngrams (3-grams of sports: spo por ort rts )
                        for feature in features:
                            if feature not in dict_token_features[token]: 
                                dict_token_features[token].append(feature) # keep each tokens's feature
                            if feature not in dict_feature_count:
                                dict_feature_count[feature] = 0
                            dict_feature_count[feature] += 1 # count
        if min_web < 1:
            print ('min_web should be at least one.')
        else:
            for feature in dict_feature_count:
                if dict_feature_count[feature] >= min_web: # only keep the features whoes count is more than minimum count 
                    dict_feature_index[feature] = cnt # give those features index
                    cnt += 1
        if min_web > 1:
            dict_feature_index['Others'] = cnt # other features

        return dict_token_features, dict_feature_index
    
    @staticmethod
    def websites_to_vec_with_dict(dict_token_features, dict_feature_index, websites,add):
        
        # =========================================================================================
        # Aim: Transfer websites to features
        # Inputs: Websites, dictionary of each features' index, dictionary of each tokens' features
        # Outputs: Features
        # =========================================================================================
        
        feature_length = len(dict_feature_index)
        websites_vec = np.zeros([len(websites),feature_length],dtype=int)
        cnt = 0
        for website in websites:
            tokens = Parser.UrlParser(website).names
            for token in tokens:
                if token in dict_token_features:
                    features = dict_token_features[token]
                    for feature in features:
                        if feature in dict_feature_index:
                            if add == True: # count
                                websites_vec[cnt,dict_feature_index[feature]] += 1
                            else: # 0-1
                                websites_vec[cnt,dict_feature_index[feature]] = 1
                        else: # put it into 'Others'
                            if add == True: # count
                                websites_vec[cnt,feature_length-1] += 1
                            else: # 0-1
                                websites_vec[cnt,feature_length-1] = 1
            cnt += 1
        return websites_vec
    
    @staticmethod
    def websites_to_feature(websites,n1,n2,min_web,add):
        
        # =======================================================================================================================
        # Aim: Transfer websites dictionary (training set, test set and dev set) to features (training set, test set and dev set)
        # Main Inputs: Websites
        # Outputs: Features dictionary (training set, test set and dev set)
        # ========================================================================================================================
        
        features = {}
        [dict_token_features, dict_feature_index] = Data2Feature.train_websites_to_feature_dict(websites['train'],n1,n2,min_web)
        features['train'] = Data2Feature.websites_to_vec_with_dict(dict_token_features, dict_feature_index,websites['train'],add)
        features['test'] = Data2Feature.websites_to_vec_with_dict(dict_token_features, dict_feature_index,websites['test'],add)
        if len(websites) == 3:
            features['dev'] = Data2Feature.websites_to_vec_with_dict(dict_token_features, dict_feature_index,websites['dev'],add)

        return features 
    
    @staticmethod
    def train_titles_to_feature_dict(train_titles,cutmethod,stopwords,min_title):
    
        # =================================================================================
        # Aim: Get feature dictionary from training set (key: feature, value: index)
        # Main Inputs: Titles of Training Set
        # Outputs: Dictionary of each features' index and dictionary of each tokens' features
        # ==================================================================================
        
        cnt = 0
        dict_seg_index = {}
        dict_seg_count = {}

        for title in train_titles:
            if len(title) > 1:
                if cutmethod == 'parse': # parse title to words (u'新浪新闻' => u'新浪' u'新闻')
                    #seg_list = jieba.cut(title.strip(), cut_all=True)
                    seg_list = jieba.cut_for_search(title.strip())
                elif cutmethod == 'char': # cut title to characters (u'新浪新闻' => u'新' u'浪' u'新' u'闻')
                    seg_list = title.strip()
                else:
                    print ('Cutmethod should be one of \'char\' and \'parse\'.')
                for seg in seg_list:
                    if stopwords== True and Stopwords.justify_stop_words(seg) == False:
                        if seg not in dict_seg_count:
                            dict_seg_count[seg] = 0
                        dict_seg_count[seg] += 1
                    elif stopwords == False:
                        if seg not in dict_seg_count:
                            dict_seg_count[seg] = 0
                        dict_seg_count[seg] += 1

        if min_title < 1:
            print ('min_title should be at least one.')
        else:
            for seg in dict_seg_count:
                if dict_seg_count[seg] >= min_title:
                    dict_seg_index[seg] = cnt
                    cnt += 1
        dict_seg_index['Others'] = cnt
        return dict_seg_index
    
    @staticmethod
    def titles_to_vec_with_dict(dict_seg_index, titles, cutmethod, add):

        # =========================================================================================
        # Aim: Transfer titles to features
        # Inputs: Titles, dictionary of each features' index, dictionary of each tokens' features
        # Outputs: Features
        # =========================================================================================
        
        feature_length = len(dict_seg_index)
        titles_vec = np.zeros([len(titles),feature_length],dtype=int)
        cnt = 0
        for title in titles:
            if len(title) > 2:
                if cutmethod == 'parse':
                    #seg_list = jieba.cut(title.strip(), cut_all=True)
                    seg_list = jieba.cut_for_search(title.strip())
                elif cutmethod == 'char':
                    seg_list = title.strip()
                else:
                    print ('Cutmethod should be one of \'char\' and \'parse\'.')
                for seg in seg_list:
                    if seg in dict_seg_index:
                        if add == True:
                            titles_vec[cnt,dict_seg_index[seg]] += 1
                        else:
                            titles_vec[cnt,dict_seg_index[seg]] = 1
                    else:
                        if add == True:
                            titles_vec[cnt,feature_length-1] += 1
                        else:
                            titles_vec[cnt,feature_length-1] = 1
            cnt += 1
        return titles_vec
    
    @staticmethod
    def titles_to_feature(titles,cutmethod,min_title,add,stopwords):
        
        # =======================================================================================================================
        # Aim: Transfer titles dictionary (training set, test set and dev set) to features (training set, test set and dev set)
        # Main Inputs: Titles
        # Outputs: Titles dictionary (training set, test set and dev set)
        # ========================================================================================================================
        
        features = {}
        dict_seg_index = Data2Feature.train_titles_to_feature_dict(titles['train'],cutmethod,stopwords,min_title)
        features['train'] = Data2Feature.titles_to_vec_with_dict(dict_seg_index, titles['train'],cutmethod,add)
        features['test'] = Data2Feature.titles_to_vec_with_dict(dict_seg_index, titles['test'],cutmethod,add)
        if len(titles) == 3:
            features['dev'] = Data2Feature.titles_to_vec_with_dict(dict_seg_index, titles['dev'],cutmethod,add)
        return features 
    
    @staticmethod
    def data_to_feature(X,n1,n2,website=True,title=False,cutmethod='char',add=True,stopwords=True,min_web=2,min_title=2):
        if website == True:
            websites = {}
            websites['train'] = [X['train'][i][0] for i in range(len(X['train']))]
            websites['test'] = [X['test'][i][0] for i in range(len(X['test']))]
            if len(X) == 3:
                websites['dev'] = [X['dev'][i][0] for i in range(len(X['dev']))]
            features_websites = Data2Feature.websites_to_feature(websites,n1,n2,min_web,add)

        if title == True:
            titles = {}
            titles['train'] = [X['train'][i][1] for i in range(len(X['train']))]
            titles['test'] = [X['test'][i][1] for i in range(len(X['test']))]
            if len(X) == 3:
                titles['dev'] = [X['dev'][i][1] for i in range(len(X['dev']))]
            features_titles = Data2Feature.titles_to_feature(titles,cutmethod,min_title,add,stopwords)

        if website == True and title == True:
            features = {}
            for key in features_websites:
                features[key] = np.concatenate((features_websites[key],features_titles[key]),axis=1) # conbine features from websites and titles
        elif title == False:
            features = features_websites
        else:
            features = features_titles

        return features

