#coding=utf-8
import  sklearn as  skLearn
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.dict_vectorizer import DictVectorizer
from  sklearn.feature_extraction.text import TfidfVectorizer as tf
import sklearn.preprocessing as prep
import jieba as jb
import jieba.analyse as an
import  pandas as pd

# data=pd.read_csv('G:\学习教程\人工智能\测试数据文件夹\doc.txt')
# values=data.values
# datas=list()
# for item in  values:
#     str2=str.lstrip(item[0])
#     #中文分词
#     s2=jb.cut(str2.lstrip(),cut_all=False)
#     jbStr=" ".join(s2)
#     # print(jbStr)
#     jj=an.extract_tags(jbStr,topK=10,withWeight=True)
#     datas.append( jj)
#
# data2= (i for i in  datas)

# df=pd.DataFrame(data2)
# df1=df.fillna(0.0)
# numDatas=list()
# result2=list()
# for i in df1.values:
#     for j in i:
#       print(j[1])
#
#     result2=list(numDatas)


# 数据归一处理
# print(result2)
mm=prep.MinMaxScaler()
guiyiDatas=mm.fit_transform([[121,0.2,1,232,44],[33,44,3.3,43,434],[11,2.3,2,34,990]])
print(guiyiDatas)
# tf=tf()
# result=tf.fit_transform(datas)
# print(result.toarray())


# print(datas)
# cv=CountVectorizer()
# data2=cv.fit_transform(datas)
# print(cv.get_feature_names())
# print(data2.toarray()[2])
# cv=DictVectorizer()
# data2=cv.fit_transform(values.tolist())
# print(cv.get_feature_names())
# print(data2)

