import org.apache.spark.sql.SparkSession
import org.apache.spark.sql.functions._
import java.util.Properties
//LMG
object WordCount {
  def main(args: Array[String]) {
    // 创建 SparkSession 并设置 master URL 为 local[*]
    val spark = SparkSession.builder()
      .appName("Word Count")
      .master("local[*]")  // 设置 master URL
      .getOrCreate()
    // 导入隐式转换
    import spark.implicits._
    // 读取HDFS上的TXT文件
    val textFile = spark.read.textFile("hdfs://192.168.72.130:9000/user/lmg/10.9.txt")
    // 将每行文本分割成单词，并创建一个包含 (word, 1) 的 Dataset
    val words = textFile.flatMap(line => line.split("\\s+"))
      .map(word => (word, 1))
      .toDF("word", "count")

    // 进行单词计数
    val counts = words.groupBy("word")
      .agg(sum("count").as("total_count"))
    // JDBC属性配置
    val prop = new Properties()
    prop.setProperty("driver", "com.mysql.cj.jdbc.Driver")
    val url = "jdbc:mysql://127.0.0.1:3306/MySparkCount"
    prop.setProperty("user", "root")
    prop.setProperty("password", "MYsql323240")
    // 将结果写入MySQL
    counts.write.mode("overwrite").jdbc(url, "lmg_analysis", prop)
    spark.stop()
  }
}