import findspark
findspark.init()
from pyspark.sql import SparkSession
from pyspark.sql.functions import col, when


from pyspark.sql.types import StructType, StringType, StructField
from pyspark.sql.functions import udf



ip_schema = StructType([
    StructField("ip", StringType(), True),
    StructField("location.lat", StringType(), True),
    StructField("location.lon", StringType(), True),
    StructField("country", StringType(), True),
    StructField("city", StringType(), True),
    StructField("region_name", StringType(), True),
    StructField("continent_name", StringType(), True),
    StructField("region_iso_code", StringType(), True)
])


@udf(returnType=ip_schema)
def mock_ip_info_udf(mock):
    return ['', '', '', '', '', '', '', '']


agent_schema = StructType([
    StructField("browser", StringType(), True),
    StructField("os", StringType(), True),
    StructField("equipment", StringType(), True)
])


# Define our function that return according to UDF schema
@udf(returnType=agent_schema)
def mock_agent_info_udf(mock):
    return ['', '', '']


def extract_data(spark):
    # path = "./data/logstash.100.json"
    path = "file:////home/penglei/logstash-2021.05.12-hk-app.json"
    df = spark.read.json(path)
    return df

def transform_data(df):
    df = df.select("_source.*").filter(col("vhost") == "repo.openeuler.org").withColumn(
        "agent_info",
        mock_agent_info_udf("http_user_agent").alias("agent_info")
    ).withColumn(
        "ip_info",
        mock_ip_info_udf("remote_addr").alias("ip_info")
    ).withColumn(
        "is_iso_download",
        when(col("path").contains(".iso"), 1).otherwise(0)
    ).withColumn(
        "is_rpm_download",
        when(col("path").contains(".rpm"), 1).otherwise(0)
    ).select(
        "path",
        "bytes_sent",
        "vhost",
        "proxy_remote_addr",
        "remote_addr",
        "request_query",
        "remote_addr",
        "agent_info.*",
        "ip_info.*",
        "path",
        "is_iso_download",
        "is_rpm_download",
        col("@timestamp").alias("updated_at"),
        col("request_query").alias("link"),
        col("@timestamp").alias("created_at"),
        col("remote_addr").alias("location_ip"),
        col("vhost").alias("hostname"),
        col("path").alias("is_internal_path")
    )
    return df


def load_data(df):
    df.rdd.saveAsTextFile("./data/out")


def main():
    spark = SparkSession \
        .builder \
        .master("local[1]") \
        .appName("OM Log Washing") \
        .getOrCreate()

    df = extract_data(spark)
    df_transformed = transform_data(df)
    load_data(df_transformed)


if __name__ == "__main__":
    main()
