%spark.pyspark

'''
本案例收集网站新闻，分别有体育，女性，科技和娱乐主题的新闻报道，采用无监督的方式，区别出不同主题并提取出每个主题中的代表词
'''

from __future__ import print_function

from pyspark.sql import SparkSession
from pyspark.sql.functions import when,udf,col
from pyspark.ml.feature import HashingTF, IDF
from pyspark.ml.linalg import Vectors,VectorUDT
from pyspark.sql.types import *
from pyspark.ml.clustering import LDA

import pandas as pd
import numpy as np
import jieba
import jieba.posseg as pseg 
import re
import string
import nltk
from nltk.probability import FreqDist

#加载停用词
def load_stopwords(path):
    f_stop = open(path)
    sw = [line.strip() for line in f_stop]
    f_stop.close()
    return sw

#分词
def cut_words(sentence,stopwords):
	#利用reg去除一些标点符号
	sentence = re.sub(r'\s+',' ',sentence)
	sentence = re.sub(r'\n+',' ',sentence)
	sentence = re.sub(r'\t+',' ',sentence)
	sentence = re.sub("[\s+\.\!\/_,$%^*(+\"\']+|[+——；！，”。《》，。：“？、~@#￥%……&*（）1234567①②③④)]+".\
                          decode("utf-8"), "".decode("utf-8"), sentence)
	#提取名词
	tmpwords=pseg.cut(sentence.strip())
	newsent=[]
	for wd in tmpwords:
		if wd.flag == 'x' and wd.word !=' ' and wd.word!='ns' and wd.word!='nr'\
					or re.match(r'^n',wd.flag)!=None and re.match(r'^nr', wd.flag) == None:
			newsent.append(wd.word)
	sentence="".join(newsent)
	#分词
	words = list(jieba.cut(sentence.strip()))
	#去停用词和单音节词
	wordlist = []
	for word in words:
		if word not in stopwords and len(word)>1:
			wordlist.append(word)
	return wordlist

#构建词频向量
def buildfeatures(wordslist,keywords): 
    features = []
    freq = FreqDist(wordslist)
    for word in keywords:
    	features.append(freq.get(word,0.0))
    return features

# 加载停用词表
stopwords = load_stopwords('/root/zeppelin_data/news/stopwords.txt')
# 读入语料库
# df = spark.read.text('/root/zeppelin_data/news/news_data.txt')
df = spark.read.csv("file:///mnt/sdb1/xinwen/xinwen3.csv",header=True,inferSchema=True)
print("语料库条数",df.count())
# print(df.printSchema())
# df.show(truncate=False)

#分词
split = udf(lambda sentence:cut_words(sentence,stopwords),ArrayType(elementType=StringType(),containsNull=False))
df = df.withColumn("words",split(df.content)).drop('content','title','id')
# df.show(truncate=False)
print('分词结束')

# 建立词典
keywords = []
collectwords = df.select("words").collect()
for line in collectwords:
    freq = FreqDist(line["words"])
    keywords += [k for k,v in freq.items() if v>2]
keywords = list(set(keywords))
print('所有文档的词种类数：',len(keywords))

build = udf(lambda line:Vectors.dense(buildfeatures(line,keywords)),VectorUDT())
TFdf = df.withColumn("TFfeatures",build(col("words")))
# TFdf.show(truncate=False)
print('TF计算结束')

idf = IDF(inputCol="TFfeatures", outputCol="features")
featuredf = idf.fit(TFdf).transform(TFdf).drop('TFfeatures')
# featuredf.show(truncate=False)
print('IDF计算结束')

'''
LDA
'''
print('开始LDA训练')
lda = LDA(k=3, seed=123, optimizer="em")
model = lda.fit(featuredf)
# print(model.isDistributed()) 
# print(model.vocabSize())
# print(model.topicsMatrix())
# model.describeTopics().show(truncate=False)

print('主题词分布：')
topicsdf = model.describeTopics().collect()
for i in xrange(len(topicsdf)):
    print('#',i,'的主题词分布：')
    print(','.join([keywords[j] for j in topicsdf[i]['termIndices']]))

resdf = model.transform(featuredf)
resdf.drop('features').show(truncate=False)