# coding:utf8
# 导包
import os
import json
from pyspark.sql import SparkSession
from pyspark.sql.functions import monotonically_increasing_id, count, avg, regexp_extract, max, col, sum, when
from pyspark.sql.functions import desc, asc
from django_redis import get_redis_connection
import paramiko
import time
import sys


def clear_redis_cache():
    """清除Redis缓存"""
    try:
        redis_conn = get_redis_connection("default")

        # 定义需要清除的缓存键
        cache_keys = [
            'city_salary_*',  # 城市薪资数据
            'city_people_*',  # 城市人员分布
            'average_city',  # 城市平均工资
            'salary_category',  # 工资区间
            'exp_salary',  # 工作经验薪资
            'address_sum',  # 城市分布
            'people_category',  # 公司规模分布
            'salary_top',  # 薪资TOP10
            'type_salary',  # 职业薪资分布
            'average_type',  # 职业平均薪资
            'average_experience',  # 经验平均薪资
            'education_count',  # 学历分布
            'type_count',  # 职业数量
            'type_max'  # 职业最高薪资
        ]

        # 清除所有相关的缓存
        for key in cache_keys:
            if '*' in key:
                # 使用模式匹配删除所有匹配的键
                keys = redis_conn.keys(key)
                if keys:
                    redis_conn.delete(*keys)
            else:
                # 直接删除单个键
                redis_conn.delete(key)

        print("Redis缓存清除完成")
        return True
    except Exception as e:
        print(f"清除Redis缓存失败: {str(e)}")
        return False

def execute_remote_spark_job():
    """在远程服务器上执行Spark作业"""
    try:
        # SSH连接配置
        ssh = paramiko.SSHClient()
        ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
        ssh.connect(
            hostname='192.168.88.161',
            port=22,
            username='root',
            password='123456'
        )
        
        # 创建远程临时目录
        stdin, stdout, stderr = ssh.exec_command('mkdir -p /tmp/spark_job')
        if stderr.read():
            raise Exception("创建临时目录失败")
        
        # 上传当前Python脚本到远程服务器
        sftp = ssh.open_sftp()
        local_path = __file__
        remote_path = '/tmp/spark_job/spark_job.py'
        sftp.put(local_path, remote_path)
        
        # 在远程服务器上准备Python脚本
        remote_script = '''
import os
import time
from pyspark.sql import SparkSession
from pyspark.sql.functions import monotonically_increasing_id, count, avg, regexp_extract, max, col, sum, when
from pyspark.sql.functions import desc, asc

# 设置环境变量
os.environ["JAVA_HOME"] = "/export/server/jdk1.8.0_241"
os.environ["HADOOP_HOME"] = "/export/server/hadoop"
os.environ["HADOOP_CONF_DIR"] = "/export/server/hadoop/etc/hadoop"
os.environ["SPARK_HOME"] = "/export/server/spark"
os.environ["PYSPARK_PYTHON"] = "/root/anaconda3/bin/python3"
os.environ["PATH"] = f"{os.environ['JAVA_HOME']}/bin:{os.environ['HADOOP_HOME']}/bin:{os.environ.get('PATH', '')}"

# 创建SparkSession
spark = SparkSession.builder\\
    .appName('sparkSQL')\\
    .master('local[*]')\\
    .config("spark.sql.warehouse.dir", "hdfs://192.168.88.161:8020/user/hive/warehouse")\\
    .config("spark.sql.shuffle.partitions", 2)\\
    .config("spark.executor.memory", "2g")\\
    .config("spark.driver.memory", "2g")\\
    .config("spark.jars", "/export/server/spark/jars/mysql-connector-java-5.1.32.jar")\\
    .config("spark.eventLog.enabled", "false")\\
    .config("spark.ui.enabled", "false")\\
    .enableHiveSupport()\\
    .getOrCreate()

# 读取数据表
job_data = spark.read.table('jobData')

# 需求一：城市平均工资前十
print("\\n分析需求一：城市平均工资前十...")
top_cities = job_data.groupby("city").agg(avg("maxSalary").alias("avg_max_salary"))
top_ten_cities = top_cities.orderBy(desc("avg_max_salary"))
top_ten_cities.write.mode("overwrite").format("jdbc")\\
    .option("url", "jdbc:mysql://192.168.88.161:3306/bigdata?useSSL=false&useUnicode=true&charset=utf8")\\
    .option("dbtable", "averageCity")\\
    .option("user", "root")\\
    .option("password", "123456")\\
    .option("encoding", "utf-8").save()
top_ten_cities.write.mode("overwrite").saveAsTable("averageCity", "parquet")
spark.sql("select * from averageCity").show()

# 需求二：工资区间
print("\\n分析需求二：工资区间...")
salary_classified = job_data.withColumn("salaryCategory",
    when(col("maxSalary").between(0, 5000), "0-5k")
    .when(col("maxSalary").between(5000, 7000), "5-7k")
    .when(col("maxSalary").between(7000, 10000), "7-10k")
    .when(col("maxSalary").between(10000, 20000), "10-20k")
    .when(col("maxSalary") >= 20000, "20k 以上")
    .otherwise("未分类"))
salary_range_result = salary_classified.groupby("salaryCategory").agg(count('*').alias('count'))
salary_range_result.write.mode("overwrite").format("jdbc")\\
    .option("url", "jdbc:mysql://192.168.88.161:3306/bigdata?useSSL=false&useUnicode=true&charset=utf8")\\
    .option("dbtable", "salaryCategory")\\
    .option("user", "root")\\
    .option("password", "123456")\\
    .option("encoding", "utf-8").save()
salary_range_result.write.mode("overwrite").saveAsTable("salaryCategory", "parquet")
spark.sql("select * from salaryCategory").show()

# 需求三：工作经验分析
print("\\n分析需求三：工作经验分析...")
work_experience_analysis = job_data.groupby(["workExperience","experience_value"]).agg(
    avg("maxSalary").alias("avg_max_salary"),
    avg("minSalary").alias("avg_min_salary"))
work_experience_analysis = work_experience_analysis.orderBy("experience_value")
work_experience_analysis.write.mode("overwrite").format("jdbc")\\
    .option("url", "jdbc:mysql://192.168.88.161:3306/bigdata?useSSL=false&useUnicode=true&charset=utf8")\\
    .option("dbtable", "expSalary")\\
    .option("user", "root")\\
    .option("password", "123456")\\
    .option("encoding", "utf-8").save()
work_experience_analysis.write.mode("overwrite").saveAsTable("expSalary", "parquet")
spark.sql("select * from expSalary").show()

# 需求四：城市分布
print("\\n分析需求四：城市分布...")
city_distribution_result = job_data.groupby("city").count()
city_distribution_result.write.mode("overwrite").format("jdbc")\\
    .option("url", "jdbc:mysql://192.168.88.161:3306/bigdata?useSSL=false&useUnicode=true&charset=utf8")\\
    .option("dbtable", "addressSum")\\
    .option("user", "root")\\
    .option("password", "123456")\\
    .option("encoding", "utf-8").save()
city_distribution_result.write.mode("overwrite").saveAsTable("addressSum", "parquet")
spark.sql("select * from addressSum").show()

# 需求五：公司规模分布
print("\\n分析需求五：公司规模分布...")
job_df = job_data.withColumn("people_num", 
    when(col("companyPeople").rlike(r'-'),
         regexp_extract(col('companyPeople'), r'(\d+)-(\d+)', 1).cast("int"))
    .otherwise(col('companyPeople').cast("int")))
people_classified = job_df.withColumn("people_category",
    when(col("people_num").between(0, 19), "0-19")
    .when(col("people_num").between(20, 99), "20-99")
    .when(col("people_num").between(100, 499), "100-499")
    .when(col("people_num").between(500, 999), "500-999")
    .when(col("people_num").between(1000, 9999), "1000-9999")
    .when(col("people_num") >= 10000, "above_10000")
    .otherwise("未分类"))
people_range_result = people_classified.groupby("people_category").agg(count('*').alias('count'))
people_range_result.write.mode("overwrite").format("jdbc")\\
    .option("url", "jdbc:mysql://192.168.88.161:3306/bigdata?useSSL=false&useUnicode=true&charset=utf8")\\
    .option("dbtable", "peopleCategory")\\
    .option("user", "root")\\
    .option("password", "123456")\\
    .option("encoding", "utf-8").save()
people_range_result.write.mode("overwrite").saveAsTable("peopleCategory", "parquet")
spark.sql("select * from peopleCategory").show()

# top10
print("\\n分析需求：薪资Top10...")
top_10_salary = job_data.orderBy(col("maxSalary").desc()).limit(10)
top_10_salary.write.mode("overwrite").format("jdbc")\\
    .option("url", "jdbc:mysql://192.168.88.161:3306/bigdata?useSSL=false&useUnicode=true&charset=utf8")\\
    .option("dbtable", "salaryTop")\\
    .option("user", "root")\\
    .option("password", "123456")\\
    .option("encoding", "utf-8").save()
top_10_salary.write.mode("overwrite").saveAsTable("salaryTop", "parquet")
spark.sql("select * from salaryTop").show()

# 需求六：薪资分析 职业薪资
print("\\n分析需求六：职业薪资分析...")
industry_salary_result = job_data.groupBy("type").agg(
    sum(when(col("maxSalary") <= 5000, 1).otherwise(0)).alias("0-5k"),
    sum(when((col("maxSalary") > 5000) & (col("maxSalary") <= 7000), 1).otherwise(0)).alias("5-7k"),
    sum(when((col("maxSalary") > 7000) & (col("maxSalary") <= 10000), 1).otherwise(0)).alias("7-10k"),
    sum(when((col("maxSalary") > 10000) & (col("maxSalary") <= 20000), 1).otherwise(0)).alias("10-20k"),
    sum(when(col("maxSalary") > 20000, 1).otherwise(0)).alias("above_20k"))
industry_salary_result.write.mode("overwrite").format("jdbc")\\
    .option("url", "jdbc:mysql://192.168.88.161:3306/bigdata?useSSL=false&useUnicode=true&charset=utf8mb4")\\
    .option("dbtable", "typeSalary")\\
    .option("user", "root")\\
    .option("password", "123456")\\
    .option("encoding", "utf-8").save()
industry_salary_result.write.mode("overwrite").saveAsTable("typeSalary", "parquet")
spark.sql("select * from typeSalary").show()

# 需求七：职业平均薪资
print("\\n分析需求七：职业平均薪资...")
industry_average_salary = job_data.groupBy("type").agg(avg(col("maxSalary")).alias("avg_max_salary"))
industry_average_salary.write.mode("overwrite").format("jdbc")\\
    .option("url", "jdbc:mysql://192.168.88.161:3306/bigdata?useSSL=false&useUnicode=true&charset=utf8")\\
    .option("dbtable", "averageType")\\
    .option("user", "root")\\
    .option("password", "123456")\\
    .option("encoding", "utf-8").save()
industry_average_salary.write.mode("overwrite").saveAsTable("averageType", "parquet")
spark.sql("select * from averageType").show()

# 需求八：经验平均薪资和个数
print("\\n分析需求八：经验平均薪资和个数...")
experience_avg_salary = job_data.groupby("workExperience").agg(
    avg(col("maxSalary")).alias("avg_max_salary"),
    count('*').alias("count"))
experience_avg_salary.write.mode("overwrite").format("jdbc")\\
    .option("url", "jdbc:mysql://192.168.88.161:3306/bigdata?useSSL=false&useUnicode=true&charset=utf8")\\
    .option("dbtable", "averageExperience")\\
    .option("user", "root")\\
    .option("password", "123456")\\
    .option("encoding", "utf-8").save()
experience_avg_salary.write.mode("overwrite").saveAsTable("averageExperience", "parquet")
spark.sql("select * from averageExperience").show()

# 需求九：学历
print("\\n分析需求九：学历分布...")
educational_result = job_data.groupBy("educational").agg(count("*").alias("count"))
educational_result.write.mode("overwrite").format("jdbc")\\
    .option("url", "jdbc:mysql://192.168.88.161:3306/bigdata?useSSL=false&useUnicode=true&charset=utf8")\\
    .option("dbtable", "educationCount")\\
    .option("user", "root")\\
    .option("password", "123456")\\
    .option("encoding", "utf-8").save()
educational_result.write.mode("overwrite").saveAsTable("educationCount", "parquet")
spark.sql("select * from educationCount").show()

# 需求十：职业个数
print("\\n分析需求十：职业数量统计...")
industry_num_result = job_data.groupBy("type").agg(count("*").alias("count"))
industry_num_result.write.mode("overwrite").format("jdbc")\\
    .option("url", "jdbc:mysql://192.168.88.161:3306/bigdata?useSSL=false&useUnicode=true&charset=utf8")\\
    .option("dbtable", "typeCount")\\
    .option("user", "root")\\
    .option("password", "123456")\\
    .option("encoding", "utf-8").save()
industry_num_result.write.mode("overwrite").saveAsTable("typeCount", "parquet")
spark.sql("select * from typeCount").show()

# 需求十一：各类型最大值
print("\\n分析需求十一：职业最高薪资...")
industry_max_result = job_data.groupBy("type").agg(max(col("maxSalary")).alias("max_salary"))
industry_max_result.write.mode("overwrite").format("jdbc")\\
    .option("url", "jdbc:mysql://192.168.88.161:3306/bigdata?useSSL=false&useUnicode=true&charset=utf8")\\
    .option("dbtable", "typeMax")\\
    .option("user", "root")\\
    .option("password", "123456")\\
    .option("encoding", "utf-8").save()
industry_max_result.write.mode("overwrite").saveAsTable("typeMax", "parquet")
spark.sql("select * from typeMax").show()

# 需求十二：各城市薪资情况
print("\\n分析需求十二：城市薪资分布...")
conditions = [
    (col("maxSalary") <= 5000, '0-5k'),
    ((col("maxSalary") > 5000) & (col("maxSalary") <= 7000), '5-7k'),
    ((col("maxSalary") > 7000) & (col("maxSalary") <= 10000), '7-10k'),
    ((col("maxSalary") > 10000) & (col("maxSalary") <= 20000), '10-20k'),
    (col("maxSalary") > 20000, 'above_20k')
]
city_salary_situation = job_data.groupBy("city").agg(
    *[count(when(condition, 1)).alias(range_name) for condition, range_name in conditions])
city_salary_situation.write.mode("overwrite").format("jdbc")\\
    .option("url", "jdbc:mysql://192.168.88.161:3306/bigdata?useSSL=false&useUnicode=true&charset=utf8")\\
    .option("dbtable", "citySalary")\\
    .option("user", "root")\\
    .option("password", "123456")\\
    .option("encoding", "utf-8").save()
city_salary_situation.write.mode("overwrite").saveAsTable("citySalary", "parquet")
spark.sql("select * from citySalary").show()

# 需求十三：各城市公司规模分布
print("\\n分析需求十三：城市公司规模分布...")
conditions1 = [
    (col("people_num") < 20, '0-19'),
    ((col("people_num") >= 20) & (col("people_num") < 100), '20-99'),
    ((col("people_num") >= 100) & (col("people_num") < 500), '100-499'),
    ((col("people_num") >= 500) & (col("people_num") < 1000), '500-999'),
    ((col("people_num") >= 1000) & (col("people_num") < 10000), '1000-9999'),
    (col("people_num") >= 10000, 'above_10000')
]
city_people_result = job_df.groupBy("city").agg(
    *[count(when(condition, 1)).alias(range_name) for condition, range_name in conditions1])
city_people_result.write.format("jdbc")\\
    .mode("overwrite")\\
    .option("url", "jdbc:mysql://192.168.88.161:3306/bigdata?useSSL=false&useUnicode=true&charset=utf8")\\
    .option("dbtable", "cityPeople")\\
    .option("user", "root")\\
    .option("password", "123456")\\
    .option("encoding", "utf-8")\\
    .save()
city_people_result.write.mode("overwrite").saveAsTable("cityPeople", "parquet")
spark.sql("select * from cityPeople").show()

print("\\n所有分析任务完成！")
spark.stop()
'''
        
        # 将远程脚本写入文件
        with sftp.open('/tmp/spark_job/remote_spark_job.py', 'w') as f:
            f.write(remote_script)
        
        # 设置环境变量并运行脚本
        command = '''
export JAVA_HOME=/export/server/jdk1.8.0_241 && \\
export HADOOP_HOME=/export/server/hadoop && \\
export HADOOP_CONF_DIR=/export/server/hadoop/etc/hadoop && \\
export SPARK_HOME=/export/server/spark && \\
export PYSPARK_PYTHON=/root/anaconda3/bin/python3 && \\
export PATH=$JAVA_HOME/bin:$HADOOP_HOME/bin:$SPARK_HOME/bin:$PATH && \\
cd /tmp/spark_job && \\
python3 remote_spark_job.py
'''
        
        # 执行命令并获取输出
        stdin, stdout, stderr = ssh.exec_command(command)
        
        # 实时输出执行结果
        while not stdout.channel.exit_status_ready():
            if stdout.channel.recv_ready():
                print(stdout.channel.recv(1024).decode(), end='')
            if stderr.channel.recv_stderr_ready():
                print(stderr.channel.recv_stderr(1024).decode(), end='')
            time.sleep(0.1)
        
        # 获取最后的输出
        remaining_stdout = stdout.read().decode()
        remaining_stderr = stderr.read().decode()
        if remaining_stdout:
            print(remaining_stdout)
        if remaining_stderr:
            print(remaining_stderr)
            
        # 获取退出状态
        exit_status = stdout.channel.recv_exit_status()
        if exit_status != 0:
            raise Exception(f"远程命令执行失败，退出状态码：{exit_status}")
        
        # 清理临时文件
        ssh.exec_command('rm -rf /tmp/spark_job')
        
        # 关闭连接
        sftp.close()
        ssh.close()
        
        # 清理Redis缓存
        print("\n开始清理Redis缓存...")
        if not clear_redis_cache():
            print("警告：Redis缓存清理失败，但不影响分析结果")
        
        return True
    except Exception as e:
        print(f"远程执行失败: {str(e)}")
        return False

if __name__ == '__main__':
    try:
        if not execute_remote_spark_job():
            raise Exception("Spark作业执行失败")
    except Exception as e:
        print(f"发生错误: {str(e)}")
        sys.exit(1)

def setup_remote_spark():
    """设置远程Spark环境"""
    try:
        # SSH连接配置
        ssh = paramiko.SSHClient()
        ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
        ssh.connect(
            hostname='192.168.88.161',  # 远程主机地址
            port=22,
            username='root',  # 远程主机用户名
            password='123456'  # 远程主机密码
        )
        
        # 设置远程环境变量
        commands = [
            'export JAVA_HOME=/export/server/jdk1.8.0_241/',
            'export PYSPARK_PYTHON=/root/anaconda3/bin/python3',
            'export HADOOP_CONF_DIR=/export/server/hadoop/etc/hadoop/',
            'export HIVE_HOME=/export/server/hive',
            'export PATH=$JAVA_HOME/bin:$HIVE_HOME/bin:$PATH',
            'export HIVE_CONF_DIR=/export/server/hive/conf'
        ]
        
        for cmd in commands:
            stdin, stdout, stderr = ssh.exec_command(cmd)
            if stderr.channel.recv_exit_status() != 0:
                print(f"设置环境变量失败: {stderr.read().decode()}")
        
        # 检查Hive Metastore服务状态
        stdin, stdout, stderr = ssh.exec_command('jps | grep RunJar')
        if not stdout.read().decode():
            print("Hive Metastore服务未运行，尝试启动...")
            ssh.exec_command('nohup hive --service metastore > /dev/null 2>&1 &')
            time.sleep(5)  # 等待服务启动
        
        ssh.close()
        return True
    except Exception as e:
        print(f"SSH连接失败: {str(e)}")
        return False


