from sklearn.feature_extraction.text import CountVectorizer

X = ['我 爱 你 ！ あなた',
     '我 恨 你 恨 你 。漢字',
     'apple is fruit',
     'vehicle is matter']

# count = CountVectorizer(token_pattern='[a-zA-Z\u4e00­\u9fa5]+')  # ATTENTION - is ord AD
count = CountVectorizer(token_pattern='[a-zA-Z\u4e00-\u9fa5]+')  # ATTENTION - is ord 2D 四姨零零 舅发我

print(f'{ord("­"):X} vs {ord("-"):X}')

x = count.fit_transform(X)
print(x.A)
print(x.toarray())
print(count.get_feature_names())

from sklearn.feature_extraction.text import TfidfVectorizer
ti = TfidfVectorizer(norm=None,
                     # token_pattern='[a-zA-Z\u4e00­\u9fa5]+',  # ATTENTION - is ord AD
                     token_pattern='[a-zA-Z\u4e00-\u9fa5]+',  # ATTENTION - is ord 2D
                     )
x = ti.fit_transform(X)
print(ti.get_feature_names())
print(x.A)
