# -*- coding: utf-8 -*-
"""
Created on Fri Oct 15 08:30:02 2021

@author: joker
"""

# LDA分类器实验
# 读取垃圾短信数据集
import pandas as pd 
from nlpia.data.loaders import get_data 
# 更好地显示宽列消息文本
pd.options.display.width = 120 
sms = get_data('sms-spam') 
# 通过添加感叹号 区分垃圾短消息
index = ['sms{}{}'.format(i, '!'*j) for (i,j) in zip(range(len(sms)), sms.spam)] 
sms = pd.DataFrame(sms.values, columns=sms.columns, index=index) 
sms['spam'] = sms.spam.astype(int) 
# 查看数据
len(sms)
sms.spam.sum() 
sms.head(6)
# TF-IDF向量
# 导入模块
from sklearn.feature_extraction.text import TfidfVectorizer 
from nltk.tokenize.casual import casual_tokenize 
# 分词
tfidf_model = TfidfVectorizer(tokenizer=casual_tokenize) 
# 转化为TF-IDF向量
tfidf_docs = tfidf_model.fit_transform( raw_documents=sms.text).toarray() 
tfidf_docs.shape 
sms.spam.sum() 
# 计算垃圾类和非垃圾类地质心
mask = sms.spam.astype(bool).values 
spam_centroid = tfidf_docs[mask].mean(axis=0) 
ham_centroid = tfidf_docs[~mask].mean(axis=0) 
# 质心向量
spam_centroid.round(2) 
ham_centroid.round(2)
# 通过用一个质心向量减去另一个质心向量得到分类线
spamminess_score = tfidf_docs.dot(spam_centroid -ham_centroid) 
spamminess_score.round(2) 
# 绘制2d图像
from sklearn.decomposition import PCA
from matplotlib import pyplot as plt
pca_model = PCA(n_components=3)
tfidf_docs_3d = pca_model.fit_transform(tfidf_docs)
df = pd.DataFrame(tfidf_docs_3d)
ax = df[~mask].plot(x=0, y=1, kind='scatter', alpha=.5, c='green')
df[mask].plot(x=0, y=1, ax=ax, alpha=.1, kind='scatter', c='red')
plt.xlabel(' x')
plt.ylabel(' y')
plt.savefig('spam_lda_2d_scatter.png')
# 绘制3d图像
import plotly as py
spam_trace = dict(
        x=df[0][mask], y=df[1][mask], z=df[2][mask],
        type="scatter3d", mode='markers',
        marker= dict(size=3, color='red', line=dict(width=0)) 
    )
ham_trace = dict(
        x=df[0][~mask], y=df[1][~mask], z=df[2][~mask],
        type="scatter3d", mode='markers',
        marker= dict(size=3, color='green', line=dict(width=0)) 
    )
fig = dict(data=[ham_trace, spam_trace], layout={'title': 'LDA Spamminess Model'})
py.offline.plot(fig, filename='lda_spam_3d_scatter.html')
# LDA分类器训练
from sklearn.preprocessing import MinMaxScaler 
sms['lda_score'] = MinMaxScaler().fit_transform(spamminess_score.reshape(-1,1)) 
# 阈值设置为50%
sms['lda_predict'] = (sms.lda_score > .5).astype(int) 
# 查看结果
sms['spam lda_predict lda_score'.split()].round(2).head(6)
# 性能得分
(1. - (sms.spam - sms.lda_predict).abs().sum() / len(sms)).round(3)
# 混淆矩阵
from pugnlp.stats import Confusion 
Confusion(sms['spam lda_predict'.split()])


# LSA实验
# LSA主题-词矩阵
import _locale
_locale._getdefaultlocale = (lambda *args: ['en_US', 'utf8' ])
from nlpia.book.examples.ch04_catdog_lsa_3x6x16 import word_topic_vectors 
# 查看数据
word_topic_vectors.T.round(1)
# 运用lsa_models()获得文档-词项矩阵
from nlpia.book.examples.ch04_catdog_lsa_sorted import lsa_models, prettify_tdm 
bow_svd, tfidf_svd = lsa_models() 
prettify_tdm(**bow_svd)
# 查看结果
tdm = bow_svd['tdm']
# 左奇异向量 U
import numpy as np 
U, s, Vt = np.linalg.svd(tdm) 
import pandas as pd 
pd.DataFrame(U, index=tdm.index).round(2)
# 奇异值向量 S
s.round(1) 
S = np.zeros((len(U), len(Vt))) 
pd.np.fill_diagonal(S, s) 
pd.DataFrame(S).round(1) 
# 右奇异向量 V
pd.DataFrame(Vt).round(2) 
# 词项-文档矩阵重构误差
err = [] 
for numdim in range(len(s), 0, -1): 
    S[numdim - 1, numdim - 1] = 0 
    reconstructed_tdm = U.dot(S).dot(Vt) 
    err.append(np.sqrt(((reconstructed_tdm - tdm).values.flatten() ** 2).sum() 
        / np.product(tdm.shape)))
np.array(err).round(2)


# PCA实验
# 三维向量上的PCA
import pandas as pd 
# 更适合页宽的输出
pd.set_option('display.max_columns', 6) 
from sklearn.decomposition import PCA 
from matplotlib import pyplot as plt 
from nlpia.data.loaders import get_data 
df = get_data('pointcloud').sample(1000) 
# 将三维点云缩减为二维投影
pca = PCA(n_components=2) 
df2d = pd.DataFrame(pca.fit_transform(df), columns=list('xy')) 
df2d.plot(kind='scatter', x='x', y='y') 
# 查看结果
plt.show() 
# 计算TF-IDF向量
import pandas as pd 
# 导入短消息数据
from nlpia.data.loaders import get_data 
pd.options.display.width = 120 
sms = get_data('sms-spam') 
# 在短消息后加"!" 以便识别
index = ['sms{}{}'.format(i, '!'*j) for (i,j) in zip(range(len(sms)), sms.spam)] 
sms.index = index 
# 查看数据
sms.head(6)
# 计算每条消息的TF-IDF向量
from sklearn.feature_extraction.text import TfidfVectorizer 
from nltk.tokenize.casual import casual_tokenize 
# 分词
tfidf = TfidfVectorizer(tokenizer=casual_tokenize) 
tfidf_docs = tfidf.fit_transform(raw_documents=sms.text).toarray() 
len(tfidf.vocabulary_) 
tfidf_docs = pd.DataFrame(tfidf_docs) 
# 中心化处理
tfidf_docs = tfidf_docs - tfidf_docs.mean() 
tfidf_docs.shape 
# 输出结果
sms.spam.sum() 
# 基于PCA的短消息语义分析
from sklearn.decomposition import PCA 
# 将数据集9232维的TF-IDF向量转换为16维主题向量
pca = PCA(n_components=16) 
pca = pca.fit(tfidf_docs) 
pca_topic_vectors = pca.transform(tfidf_docs) 
columns = ['topic{}'.format(i) for i in range(pca.n_components)] 
pca_topic_vectors = pd.DataFrame(pca_topic_vectors, columns=columns,index=index) 
# 查看数据
pca_topic_vectors.round(3).head(6) 
tfidf.vocabulary_
column_nums, terms = zip(*sorted(zip(tfidf.vocabulary_.values(),tfidf.vocabulary_.keys()))) 
terms
# 输出PCA中转换的权重值
weights = pd.DataFrame(pca.components_, columns=terms,index=['topic{}'.format(i) for i in range(16)]) 
pd.options.display.max_columns = 8 
weights.head(4).round(3) 
# 查看特定词汇
pd.options.display.max_columns = 12 
deals = weights['! ;) :) half off free crazy deal only $ 80 %'.split()].round(3) * 100
# 查看数据 
deals
deals.T.sum()
# 基于截断SVD的短消息语义分析
from sklearn.decomposition import TruncatedSVD 
# 计算16个主题 迭代100次
svd = TruncatedSVD(n_components=16, n_iter=100) 
svd_topic_vectors = svd.fit_transform(tfidf_docs.values) 
svd_topic_vectors = pd.DataFrame(svd_topic_vectors, columns=columns,index=index) 
# 查看数据
svd_topic_vectors.round(3).head(6)
# 基于LSA的垃圾短消息分类
import numpy as np 
# 对每个主题向量按照其长度进行归一化 然后用点积计算余弦相似度
svd_topic_vectors = (svd_topic_vectors.T / np.linalg.norm(svd_topic_vectors, axis=1)).T 
# 查看数据
svd_topic_vectors.iloc[:10].dot(svd_topic_vectors.iloc[:10].T).round(1) 


# LDiA实验
# 基于LDiA主题模型的短消息语义分析
from sklearn.feature_extraction.text import CountVectorizer 
from nltk.tokenize import casual_tokenize 
np.random.seed(42)
# 计算词袋向量 
counter = CountVectorizer(tokenizer=casual_tokenize) 
bow_docs = pd.DataFrame(counter.fit_transform(raw_documents=sms.text).toarray(), index=index) 
column_nums, terms = zip(*sorted(zip(counter.vocabulary_.values(),counter.vocabulary_.keys()))) 
bow_docs.columns = terms
# 为短消息语料库创建主题向量
from sklearn.decomposition import LatentDirichletAllocation as LDiA 
ldia = LDiA(n_components=16, learning_method='batch') 
ldia = ldia.fit(bow_docs) 
# 查看数据
ldia.components_.shape
# 使宽页显示更美观
pd.set_option('display.width', 75) 
components = pd.DataFrame(ldia.components_.T, index=terms,columns=columns) 
components.round(2).head(3)
# 查看topic3数据
components.topic3.sort_values(ascending=False)[:10] 
# 为所有文档计算LDiA主题向量
ldia16_topic_vectors = ldia.transform(bow_docs) 
ldia16_topic_vectors = pd.DataFrame(ldia16_topic_vectors,index=index, columns=columns) 
# 查看数据
ldia16_topic_vectors.round(2).head()
# LDiA级联LDA实现垃圾消息过滤器
# 使用LDiA主题向量训练LDA模型
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis as LDA 
from sklearn.model_selection import train_test_split
# 设置测试集 训练集
X_train, X_test, y_train, y_test = train_test_split(ldia16_topic_vectors, sms.spam, test_size=0.5, random_state=271828) 
lda = LDA(n_components=1) 
lda = lda.fit(X_train, y_train) 
sms['ldia16_spam'] = lda.predict(ldia16_topic_vectors) 
# 查看结果
round(float(lda.score(X_test, y_test)), 2) 
# 32个LDiA主题模型的训练
# 使用32个主题
ldia32 = LDiA(n_components=32, learning_method='batch') 
ldia32 = ldia32.fit(bow_docs) 
ldia32.components_.shape 
# 计算文档的32维主题向量
ldia32_topic_vectors = ldia32.transform(bow_docs) 
columns32 = ['topic{}'.format(i) for i in range(ldia32.n_components)] 
ldia32_topic_vectors = pd.DataFrame(ldia32_topic_vectors, index=index,columns=columns32) 
ldia32_topic_vectors.round(2).head()
# 使用32个主题向量训练LDA模型
# 设置训练集 测试集
X_train, X_test, y_train, y_test = train_test_split(ldia32_topic_vectors, sms.spam, test_size=0.5, random_state=271828) 
lda = LDA(n_components=1) 
lda = lda.fit(X_train, y_train) 
sms['ldia32_spam'] = lda.predict(ldia32_topic_vectors) 
# 检查向量维数
X_train.shape 
# 查看结果
round(float(lda.score(X_train, y_train)), 3) 
round(float(lda.score(X_test, y_test)), 3)











