import numpy as np
import pandas as pd

from sklearn.feature_extraction.text import TfidfVectorizer,CountVectorizer,HashingVectorizer,TfidfTransformer

arr1 = [
    "This is spark, spark sql a every good",
    "Spark Hadoop Hbase",
    "This is sample",
    "This is anthor example anthor example",
    "spark hbase hadoop spark hive hbase hue oozie",
    "hue oozie spark"
]
arr2 = [
    "this is a sample a example",
    "this c c cd is another another sample example example",
    "spark Hbase hadoop Spark hive hbase"
]

data=arr2
tfidf=TfidfVectorizer(min_df=0,dtype=np.float64)
data2=tfidf.fit_transform(data)
print(data2.toarray())
print(tfidf.get_feature_names())
print(tfidf.get_stop_words())
print('转换另外的文档数据')
print(tfidf.transform(arr1).toarray())

hashing=HashingVectorizer(n_features=20,non_negative=True,norm=None)
data3=hashing.fit_transform(data)
print(data3.toarray())
print(hashing.get_stop_words())
print('转换另外的文档数据')
print(hashing.transform(arr1).toarray())

count=CountVectorizer(min_df=0.1,dtype=np.float64,ngram_range=(0,1))
data4=count.fit_transform(data)
print(data4.toarray())
print(count.get_feature_names())
print('转换另外的文档数据')
print(count.transform(arr1).toarray())
print(data4)

tfidf2=TfidfTransformer()
data5=tfidf2.fit_transform(data4)
print(data5.toarray())
print('转换另外的文档数据')
print(tfidf2.transform(count.transform(arr1)).toarray())


dataset = [['my', 'dog', 'dog', 'has', 'flea', 'problems', 'help', 'please'],
         ['maybe', 'not', 'take', 'him', 'to', 'dog', 'park', 'stupid'],
         ['my', 'dalmation', 'is', 'so', 'cute', 'I', 'love', 'him'],
         ['stop', 'posting', 'stupid', 'worthless', 'garbage'],
         ['mr', 'licks', 'ate', 'my', 'steak', 'how', 'to', 'stop', 'him'],
         ['quit','worthless', 'buying', 'worthless', 'dog', 'food', 'stupid']]

voc_set=set()
for voc in dataset:
    voc_set|=set(voc)
voc_list=list(voc_set)

# 词集模型
SOW=[]
for voc in dataset:
    vec=[0]*len(voc_list)
    for i,word in enumerate(voc_list):
        if word in voc:
            vec[i]=1
    SOW.append(vec)  
          
# 词袋模型
BOW=[]
for voc in dataset:
    vec=[0]*len(voc_list)
    for word in voc:
       vec[voc_list.index(word)]+=1
    BOW.append(vec)
    
print('词袋法以及词集法')
print(np.array(SOW))
print(np.array(BOW))