from pyspark.sql.functions import *
from pyspark.sql.session import SparkSession
from pyspark.sql.types import Row
import re

# 创建spark sql执行环境
spark = SparkSession \
    .builder \
    .appName("rdd") \
    .getOrCreate()

# 获取spark rdd的环境
sc = spark.sparkContext

# 1、读取日志数据
lines_rdd = sc.textFile("../../data/hadoop-root-namenode-master.log")


def map_log(line):
    # 使用正则表达式解析
    match = re.match("(\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2},\d{3}) ([A-Z]{2,6}) (.*?): (.*)", line)
    if not match:
        return None, None, None, None

    log_time = match.group(1)
    log_type = match.group(2)
    log_class = match.group(3)
    log_info = match.group(4)
    return log_time, log_type, log_class, log_info


# 1、解析日志数据
logs_rdd = lines_rdd.map(map_log)

# 过滤脏数据
filter_rdd = logs_rdd.filter(lambda log: log[0] is not None)


# 2、将RDD转换成DF使用DSl统计
lines_df = spark.createDataFrame(filter_rdd, ["log_time", "log_type", "log_class", "log_info"])

lines_df \
    .groupBy("log_type") \
    .agg(count("*").alias("num")) \
    .show()
