import json
import findspark
findspark.init()
from pyspark.sql import SparkSession
from pyspark.sql.functions import col, when, lit, length
from udf import agent_info_udf, ip_info_udf, ismatch_udf

from elasticsearch import helpers, Elasticsearch


def extract_data(spark):
    # TODO: load real obs
    path = "../data/logstash.100.json"
    df = spark.read.json(path)
    return df


def transform_data(df):
    df = df.select("_source.*").withColumn(
        "agent_info",
        agent_info_udf("http_user_agent").alias("agent_info")
    ).withColumn(
        "ip_info",
        ip_info_udf("remote_addr", "proxy_remote_addr").alias("ip_info")
    ).withColumn(
        "is_iso_download",
        when(col("path").contains(".iso"), '1').otherwise('0')
    ).withColumn(
        "is_rpm_download",
        when(col("path").contains(".rpm"), '1').otherwise('0')
    ).withColumn(
        "is_internal_path",
        ismatch_udf("path").alias("is_internal_path")
    ).withColumn(
        "hostname",
        # Remove 'www.' prefix
        when(col("vhost").contains("www."),
             col("vhost").substr(lit(5), length("vhost") - 4).alias("hostname")
             ).otherwise(col("vhost"))
    ).select(
        "path",
        "bytes_sent",
        "vhost",
        "proxy_remote_addr",
        "remote_addr",
        "request_query",
        "remote_addr",
        "agent_info.*",
        "ip_info.*",
        "path",
        "is_internal_path",
        "is_iso_download",
        "is_rpm_download",
        "hostname",
        col("@timestamp").alias("updated_at"),
        col("request_query").alias("link"),
        col("@timestamp").alias("created_at"),
        col("remote_addr").alias("location_ip")
    )
    return df


def load_data(df):
    es = Elasticsearch([''],
                       http_auth=('', ''),
                       use_ssl=True, verify_certs=False)
    if es.indices.exists(index="spark_yikun"):
        es.indices.delete(index="spark_yikun")
    # TODO: filter & repartition

    def f(rows):
        actions = []
        _es = Elasticsearch([''],
                            http_auth=('', ''),
                            use_ssl=True, verify_certs=False)
        for row in rows:
            actions.append(json.loads(row))
        if actions:
            helpers.bulk(_es, actions, index="spark_yikun", doc_type="_doc")
        return True
    df.toJSON().foreachPartition(f)


def main():
    spark = SparkSession \
        .builder \
        .master("local[1]") \
        .appName("OM Log Washing") \
        .getOrCreate()

    df = extract_data(spark)
    df_transformed = transform_data(df)
    load_data(df_transformed)


if __name__ == "__main__":
    main()
