# tfidf处理
import scipy.io as scio
import numpy as np
import csv
import datetime
from sklearn.feature_extraction.text import TfidfVectorizer


# 读取mat文件
def readMat(matPath):
    return scio.loadmat(matPath)


# 加载数据集
startTime = datetime.datetime.now()
matPath = '../dataSet20180417.mat'
dataSet = readMat(matPath)
print('数据集读取完成')

# 读取数据集
chara = np.array(dataSet['chara'])
labels = np.array(dataSet['labels'])
print('数据集读取完成')

charaStr = []
for i in range(len(chara)):
    charaStrItem = ""
    for j in range(len(chara[i])):
        if chara[i][j] == 1:
            charaStrItem += "feature%s " % j
    # print(charaStrItem)
    charaStr.append(charaStrItem)
    if i % 10000 == 0:
        print("已经处理了%s条数据"%i)
    # if i >= 10000:
    #     break
endTime = datetime.datetime.now()
print("数据集转为文本完成，目前用时%s" % (endTime - startTime))

# TFIDF训练
countVec = TfidfVectorizer(norm='l2', decode_error='strict', min_df=0)
countVec.fit(charaStr)
charaVec = countVec.transform(charaStr)
endTime = datetime.datetime.now()
print("TFIDF训练完成，目前用时%s" % (endTime - startTime))

# TF-IDF数据集存储（CSV）
csvPath1 = "TFIDF(with header).csv"
csvPath2 = "TFIDF(without header).csv"
fileHeader = []
for feature_name in countVec.get_feature_names():
    fileHeader.append(feature_name)

csvFile1 = open(csvPath1, 'w', newline='')
writer1 = csv.writer(csvFile1)
writer1.writerow(fileHeader)
csvFile2 = open(csvPath2, 'w', newline='')
writer2 = csv.writer(csvFile2)

for charaVecItem in charaVec:
    writer1.writerow(charaVecItem.toarray()[0])
    writer2.writerow(charaVecItem.toarray()[0])
csvFile1.close()
csvFile2.close()

endTime = datetime.datetime.now()
print("新数据集生成完成%s" % (endTime - startTime))