# -*- coding: utf-8 -*-

import preprocess.url_parser as Parser
import preprocess.gram as Gram
import preprocess.stopwords as StopWords
import jieba

sw = StopWords.StopWords()

def is_chinese(uchar):
    x = ord(uchar)
    if x >=0x4e00 and x <= 0x9fa5:
        return True
    else:
        return False

def ExtractFeature(X):
    # url to 3-gram features
    X_gram = []
    for i in X:
        names = Parser.UrlParser(i).names
        f = set()

        for name in names:
            words = jieba.cut(name)
            words = set(words)
            words = sw.filterSet(words)
            for word in words:
                if is_chinese(word[0]):
                    f.update([word])
                elif word.isalpha():
                    g = Gram.letter_n_gram(word, 3)
                    #g = Gram.letter_all_gram(word)
                    #g = word
                    f.update(g)
                else:
                    # need to do: split by "_-+", fenci for chinese
                    f.update([word])
        X_gram.append(f)

    return X_gram

