# -*- coding: utf-8 -*-
import preprocess.url_parser as Parser
import preprocess.gram as Gram
import numpy as np
import jieba

# ========================================================================================================
# ===============================Extract feature from training set, dev set and test set seperately ======
def train_websites_to_feature_dict(train_websites,n1,n2,least_feature_appear):

    cnt = 0
    list_feature = []
    dict_token_features = {}
    dict_feature_index = {}
    dict_feature_count = {}
    
    for website in train_websites:
        tokens = Parser.UrlParser(website).names
        for token in tokens:
            if token not in dict_token_features:
                dict_token_features[token] = []
                for i in range(n1,n2+1):
                    features = Gram.letter_n_gram(token,i)
                    for feature in features:
                        if feature not in dict_token_features[token]:
                            dict_token_features[token].append(feature)
                        if feature not in dict_feature_count:
                            dict_feature_count[feature] = 0
                        dict_feature_count[feature] += 1
    if least_feature_appear < 1:
        print ('least_feature_appear should be at least one.')
    else:
        for feature in dict_feature_count:
            if dict_feature_count[feature] >= least_feature_appear:
                dict_feature_index[feature] = cnt
                cnt += 1
    if least_feature_appear > 1:
        dict_feature_index['Others'] = cnt
    
    return dict_token_features, dict_feature_index

def websites_to_vec_with_dict(dict_token_features, dict_feature_index, websites,add):
    
    feature_length = len(dict_feature_index)
    websites_vec = np.zeros([len(websites),feature_length],dtype=int)
    cnt = 0
    for website in websites:
        tokens = Parser.UrlParser(website).names
        for token in tokens:
            if token in dict_token_features:
                features = dict_token_features[token]
                for feature in features:
                    if feature in dict_feature_index:
                        if add == True:
                            websites_vec[cnt,dict_feature_index[feature]] += 1
                        else: 
                            websites_vec[cnt,dict_feature_index[feature]] = 1
                    else:
                        if add == True:
                            websites_vec[cnt,feature_length-1] += 1
                        else:
                            websites_vec[cnt,feature_length-1] = 1
        cnt += 1
    return websites_vec
    
def websites_to_feature(websites,n1,n2,least_feature_appear,add):
    
    features = {}
    [dict_token_features, dict_feature_index] = train_websites_to_feature_dict(websites['train'],n1,n2,least_feature_appear)
    features['train'] = websites_to_vec_with_dict(dict_token_features, dict_feature_index,websites['train'],add)
    features['test'] = websites_to_vec_with_dict(dict_token_features, dict_feature_index,websites['test'],add)
    if len(websites) == 3:
        features['dev'] = websites_to_vec_with_dict(dict_token_features, dict_feature_index,websites['dev'],add)
        
    return features 

# =================================================Similar to Title: 
def justify_stop_words(char):
    STOP_WORDS = [u'【',u'】',u'[',u']',u'(',u')',u'（',u'）',
                  u'“',u'”',u'！',u'-',u'.',
                  u':', u'|',u'! ',
                  u'\'',u';',u'；',u'_',u'，',u',',u'。',u'》',u'《',u'+',u'&',
                   u'网']
    if char in STOP_WORDS:
        return False
    elif char >= '0' and char <= '10':
        return False
    elif char >= u'0' and char <= u'10':
        return False
    elif (char >= u'a' and char <= u'z') or (char >= u'A' and char <= u'Z'):
        return False
    else:
        return True
    
def train_titles_to_feature_dict(train_titles,char,least_feature_appear):
    
    cnt = 0
    dict_seg_index = {}
    dict_seg_count = {}
    
    for title in train_titles:
        if len(title) > 2:
            if char == False:
                seg_list = jieba.cut(title.strip(), cut_all=True)
            else:
                seg_list = title.strip()
            for seg in seg_list:
                if justify_stop_words(seg) == True:
                    if seg not in dict_seg_count:
                        dict_seg_count[seg] = 0
                    dict_seg_count[seg] += 1
    if least_feature_appear < 1:
        print ('least_feature_appear should be at least one.')
    else:
        for seg in dict_seg_count:
            if dict_seg_count[seg] >= least_feature_appear:
                dict_seg_index[seg] = cnt
                cnt += 1
    dict_seg_index['Others'] = cnt
    return dict_seg_index
    
def titles_to_vec_with_dict(dict_seg_index, titles, char, add):
    
    feature_length = len(dict_seg_index)
    titles_vec = np.zeros([len(titles),feature_length],dtype=int)
    cnt = 0
    for title in titles:
        if len(title) > 2:
            if char == False:
                seg_list = jieba.cut(title.strip(), cut_all=True)
            else:
                seg_list = title.strip()
            for seg in seg_list:
                if seg in dict_seg_index:
                    if add == True:
                        titles_vec[cnt,dict_seg_index[seg]] += 1
                    else:
                        titles_vec[cnt,dict_seg_index[seg]] = 1
                else:
                    if add == True:
                        titles_vec[cnt,feature_length-1] += 1
                    else:
                        titles_vec[cnt,feature_length-1] = 1
        cnt += 1
    return titles_vec

def titles_to_feature(titles,char,least_feature_appear,add):
    
    features = {}
    dict_seg_index = train_titles_to_feature_dict(titles['train'],char,least_feature_appear)
    features['train'] = titles_to_vec_with_dict(dict_seg_index, titles['train'],char,add)
    features['test'] = titles_to_vec_with_dict(dict_seg_index, titles['test'],char,add)
    if len(titles) == 3:
        features['dev'] = titles_to_vec_with_dict(dict_seg_index, titles['dev'],char,add)
    return features 

def data_to_feature(X,n1,n2,website=True,title=False,char=False,add=True,least_feature_appear_websites=2,least_feature_appear_titles=2):
    if website == True:
        print 
        websites = {}
        websites['train'] = [X['train'][i][0] for i in range(len(X['train']))]
        websites['test'] = [X['test'][i][0] for i in range(len(X['test']))]
        if len(X) == 3:
            websites['dev'] = [X['dev'][i][0] for i in range(len(X['dev']))]
        features_websites = websites_to_feature(websites,n1,n2,least_feature_appear_websites,add)
        
    if title == True:
        titles = {}
        titles['train'] = [X['train'][i][1] for i in range(len(X['train']))]
        titles['test'] = [X['test'][i][1] for i in range(len(X['test']))]
        if len(X) == 3:
            titles['dev'] = [X['dev'][i][1] for i in range(len(X['dev']))]
        features_titles = titles_to_feature(titles,char,least_feature_appear_titles,add)
        
    if website == True and title == True:
        features = {}
        for key in features_websites:
            features[key] = np.concatenate((features_websites[key],features_titles[key]),axis=1) 
    elif title == False:
        features = features_websites
    else:
        features = features_titles
        
    return features

# ==============================================================================================================