# coding:utf8

from pyspark.sql import SparkSession
from pyspark.sql.types import StructType, StringType
import jieba

# 4 查询搜索关键词前5的词语(wan)
if __name__ == '__main__':
    spark = SparkSession.builder. \
        appName("xixi343"). \
        getOrCreate()
    sc = spark.sparkContext

    def context_jieba(data):
        seg = jieba.cut_for_search(data)
        l = list()
        for word in seg:
            l.append(word)
        return l
    def append_words(data):
        if data == '地震': data = 'Earthquake'
        if data == '救灾': data = 'Disaster relief'
        if data == '物资': data = 'Materials'
        if data == '救灾物资': data = 'Disaster relief materials'
        if data == '哄抢': data = 'Plunder'
        return (data, 1)
    def filter_words(data):
        return data not in ['+', '.', '的']
    rdd = sc.textFile("hdfs://node1:8020/input/1.csv"). \
        map(lambda x: x.split("\t")). \
        map(lambda x: x[2]).\
        flatMap(context_jieba). \
        filter(filter_words). \
        map(append_words). \
        reduceByKey(lambda a, b: a + b). \
        sortBy(lambda x: x[1], ascending=False, numPartitions=1). \
        take(5)
    schema = StructType().add("contain", StringType(), nullable=True). \
        add("num", StringType(), nullable=True)
    df = spark.createDataFrame(rdd, schema=schema)
    df.show()
    df.write.mode("overwrite"). \
        format("jdbc"). \
        option("url", "jdbc:mysql://node1:3306/bigdata?useSSL=false&useUnicode=true"). \
        option("dbtable", "hot_word"). \
        option("user", "root"). \
        option("password", "123456"). \
        save()


