import findspark
from pyspark.sql import SparkSession
import happybase
import jieba
from txdpy import get_chinese
from pyspark.ml.feature import Word2Vec
from pyspark.ml.feature import CountVectorizer
from pyspark.ml.feature import IDF
from pyspark.ml.feature import CountVectorizerModel

findspark.init(spark_home='/usr/local/spark/',python_path='/home/master/LoadData/venv/bin/python')

spark = SparkSession.builder.appName('SparkHBaseRDD').master('local[*]').getOrCreate()
sc=spark.sparkContext

print('加载数据')
connection=happybase.Connection('master')
table=connection.table('news')
g=table.scan()
data_list = []
for k, d in g:
    new_d = {}
    new_d["id"] = str(k, 'utf-8')
    new_title = [i for i in get_chinese(str(d[b"info:title"], 'utf-8'))]
    new_d["title"] = [i for i in jieba.cut(str(''.join(new_title)), cut_all=True)]
    
    data_list.append(new_d)

df = spark.createDataFrame(data_list)

print('Word2Vec 词袋模型训练')
w2v_model = Word2Vec(vectorSize=64, inputCol='title', outputCol='vector', minCount=3)
model = w2v_model.fit(df)
model.write().overwrite().save("models/word2vec_model/python.word2vec")

print('CountVectorizer 模型训练')
cv = CountVectorizer(inputCol="title", outputCol="countFeatures", vocabSize=200 * 10000, minDF=1.0)
cv_model = cv.fit(df)
cv_model.write().overwrite().save("models/CV.model")

cv_model = CountVectorizerModel.load("models/CV.model")
cv_result = cv_model.transform(df)

print('IDF 模型训练')
idf = IDF(inputCol="countFeatures", outputCol="idfFeatures")
idf_model = idf.fit(cv_result)
idf_model.write().overwrite().save("models/IDF.model")

print('训练完成')
