from pyspark.sql import SparkSession
from pyspark.sql.functions import col, date_format, hour, count, when, desc
from pyspark.sql.types import StructType, StructField, StringType, IntegerType
from datetime import datetime


def now_time():
    current_time = datetime.now()
    formatted_time = current_time.strftime('%Y-%m-%d %H:%M')
    return formatted_time


def create_spark_session():
    return SparkSession.builder \
        .appName("Douyin Data Analysis") \
        .config("spark.jars", "/usr/local/spark/jars/sqlite-connector-java/sqlite-jdbc.jar") \
        .getOrCreate()


def read_table(spark, db_path, table_name):
    return spark.read \
        .format("jdbc") \
        .option("url", f"jdbc:sqlite:{db_path}") \
        .option("driver", "org.sqlite.JDBC") \
        .option("dbtable", table_name) \
        .load()


class FrequencyAnalysis:
    def __init__(self, spark, db_path, room_id, analyzed_table, analyze_time):
        self.spark = spark
        self.db_path = db_path
        self.room_id = room_id
        self.analyzed_table = analyzed_table
        self.analyze_time = analyze_time

    def analyze(self):
        # 读取表数据并过滤条件
        df = read_table(self.spark, self.db_path, self.analyzed_table).filter(
            (col("room_id") == self.room_id) &
            (date_format(col("create_time"), "yyyy-MM-dd") == self.analyze_time)
        )

        # 按小时分组统计
        result = (
            df.withColumn("hour", hour(col("create_time")))
            .groupBy("hour")
            .agg(count("id").alias("count"))
            .orderBy("hour")
        )

        # 初始化频率数组并填充数据
        frequency = [0] * 24
        for row in result.collect():
            frequency[row["hour"]] = row["count"]

        # 返回结果
        return {
            "create_time": now_time(),
            "room_id": self.room_id,
            "analyzed_table": self.analyzed_table,
            "analyze_time": self.analyze_time,
            "frequency": ",".join(map(str, frequency))
        }


class GenderAnalysis:
    def __init__(self, spark, db_path, room_id, analyzed_table, analyze_time):
        self.spark = spark
        self.db_path = db_path
        self.room_id = room_id
        self.analyzed_table = analyzed_table
        self.analyze_time = analyze_time

    def analyze(self):
        if self.analyzed_table == "MemberMessage":
            # 如果 analyzed_table 是 "MemberMessage"，直接从该表获取 gender 数据
            df = read_table(self.spark, self.db_path, "MemberMessage").filter(
                (col("room_id") == self.room_id) &
                (date_format(col("create_time"), "yyyy-MM-dd") == self.analyze_time)
            )
            total = df.count()
            male_count = df.filter(col("gender") == "男").count()
            female_count = df.filter(col("gender") == "女").count()
        else:
            # 如果 analyzed_table 不是 "MemberMessage"，需要从其他表获取 user_name 并关联 MemberMessage
            analyzed_df = read_table(self.spark, self.db_path, self.analyzed_table).filter(
                (col("room_id") == self.room_id) &
                (date_format(col("create_time"), "yyyy-MM-dd") == self.analyze_time)
            )
            user_names = analyzed_df.select("user_name").distinct()

            # 从 MemberMessage 表中获取 user_name 和 gender
            member_df = read_table(self.spark, self.db_path, "MemberMessage").filter(
                (col("room_id") == self.room_id) &
                (date_format(col("create_time"), "yyyy-MM-dd") == self.analyze_time)
            )
            gender_df = user_names.join(
                member_df, ["user_name"], "left"
            ).select("gender")

            # 计算性别统计数据
            total = gender_df.count()
            male_count = gender_df.filter(col("gender") == "男").count()
            female_count = gender_df.filter(col("gender") == "女").count()

        # 构建分析结果
        return {
            "create_time": now_time(),
            "room_id": self.room_id,
            "analyzed_table": self.analyzed_table,
            "analyze_time": self.analyze_time,
            "male": int((male_count / total) * 100) if total > 0 else 0,
            "woman": int((female_count / total) * 100) if total > 0 else 0
        }


class WaterArmyAnalysis:
    def __init__(self, spark, db_path, room_id, analyze_time):
        self.spark = spark
        self.db_path = db_path
        self.room_id = room_id
        self.analyze_time = analyze_time

    def analyze(self):
        member_df = read_table(self.spark, self.db_path, "MemberMessage").filter(
            (col("room_id") == self.room_id) &
            (date_format(col("create_time"), "yyyy-MM-dd") == self.analyze_time)
        )
        chat_df = read_table(self.spark, self.db_path, "ChatMessage").filter(
            (col("room_id") == self.room_id) &
            (date_format(col("create_time"), "yyyy-MM-dd") == self.analyze_time)
        )
        like_df = read_table(self.spark, self.db_path, "LikeMessage").filter(
            (col("room_id") == self.room_id) &
            (date_format(col("create_time"), "yyyy-MM-dd") == self.analyze_time)
        )

        result = member_df.alias("m").join(
            chat_df.alias("c"),
            (col("m.user_name") == col("c.user_name")) & (
                col("m.room_id") == col("c.room_id")),
            "left"
        ).join(
            like_df.alias("l"),
            (col("m.user_name") == col("l.user_name")) & (
                col("m.room_id") == col("l.room_id")),
            "left"
        ).groupBy("m.user_id", "m.user_name", "m.room_id").agg(
            count("c.id").alias("chat_count"),
            count("l.id").alias("like_count"),
            count("m.id").alias("enter_count")
        ).withColumn(
            "all_count", col("chat_count") +
            col("like_count") + col("enter_count")
        ).orderBy(desc("all_count")).limit(3000)

        return result.withColumn("analyze_time", when(col("all_count") >= 0, self.analyze_time))


def save_to_db(df, db_path, table_name, mode="append"):  # mode="append"or"overwrite"
    df.write.mode(mode).format("jdbc").options(
        url=f"jdbc:sqlite:{db_path}",
        driver="org.sqlite.JDBC",
        dbtable=table_name
    ).save()


def run_analysis(room_id, analyze_time, analyzed_table, analysis_type="frequency_analysis"):
    spark = create_spark_session()
    db_path = "/home/hadoop/SparkDouyin/Douyin.db"
    analyze_db_path = "/home/hadoop/SparkDouyin/analyze_data/analyze.db"

    if analysis_type == "frequency_analysis":
        # 分析频率
        freq_analyzer = FrequencyAnalysis(
            spark, db_path, room_id, analyzed_table, analyze_time)
        freq_result = freq_analyzer.analyze()
        frequency_schema = StructType([
            StructField("create_time", StringType(), True),
            StructField("room_id", StringType(), True),
            StructField("analyzed_table", StringType(), True),
            StructField("analyze_time", StringType(), True),
            StructField("frequency", StringType(), True)
        ])
        freq_df = spark.createDataFrame([freq_result], schema=frequency_schema)
        save_to_db(freq_df, analyze_db_path, "frequency_analysis")

    if analysis_type == "gender_analysis":
        # 分析性别
        gender_analyzer = GenderAnalysis(
            spark, db_path, room_id, analyzed_table, analyze_time)
        gender_result = gender_analyzer.analyze()
        gender_schema = StructType([
            StructField("create_time", StringType(), True),
            StructField("room_id", StringType(), True),
            StructField("analyzed_table", StringType(), True),
            StructField("analyze_time", StringType(), True),
            StructField("male", IntegerType(), True),
            StructField("woman", IntegerType(), True)
        ])
        gender_df = spark.createDataFrame(
            [gender_result], schema=gender_schema)
        save_to_db(gender_df, analyze_db_path, "gender_analysis")

    if analysis_type == "water_army_analysis":
        # 分析水军
        water_army_analyzer = WaterArmyAnalysis(
            spark, db_path, room_id, analyze_time)
        water_army_result = water_army_analyzer.analyze()
        save_to_db(water_army_result, analyze_db_path,
                   "water_army_analysis")

    spark.stop()


if __name__ == "__main__":
    room_id = "80017709309"
    analyze_time = "2023-12-05"
    analyzed_table = "GiftMessage"
    run_analysis(room_id, analyze_time, analyzed_table,
                 analysis_type="frequency_analysis")
