# coding:utf8
import paramiko
import time
import sys
import os
from pyspark.sql import SparkSession
from pyspark.sql.functions import monotonically_increasing_id, col, when, lit, coalesce, udf, row_number
from pyspark.sql.types import StructType, StructField, IntegerType, StringType
from pyspark.sql.window import Window
import re

def execute_remote_spark_job():
    """在远程服务器上执行Spark作业"""
    try:
        # SSH连接配置
        ssh = paramiko.SSHClient()
        ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
        ssh.connect(
            hostname='192.168.88.161',
            port=22,
            username='root',
            password='123456'
        )
        
        # 创建远程临时目录
        stdin, stdout, stderr = ssh.exec_command('mkdir -p /tmp/spark_job')
        if stderr.read():
            raise Exception("创建临时目录失败")
        
        # 上传当前Python脚本到远程服务器
        sftp = ssh.open_sftp()
        local_path = __file__
        remote_path = '/tmp/spark_job/spark_job.py'
        sftp.put(local_path, remote_path)
        
        # 在远程服务器上准备Python脚本
        remote_script = '''
import os
import time
from pyspark.sql import SparkSession
from pyspark.sql.functions import monotonically_increasing_id, col, when, lit, coalesce, udf, row_number
from pyspark.sql.types import StructType, StructField, IntegerType, StringType
from pyspark.sql.window import Window
import re

def remove_trailing_slashes(text):
    if isinstance(text, str):
        return re.sub(r"^(\s*/\s*)+|(\s*/\s*)+$", "", text)
    return text

# 设置环境变量
os.environ["JAVA_HOME"] = "/export/server/jdk1.8.0_241"
os.environ["HADOOP_HOME"] = "/export/server/hadoop"
os.environ["HADOOP_CONF_DIR"] = "/export/server/hadoop/etc/hadoop"
os.environ["SPARK_HOME"] = "/export/server/spark"
os.environ["PYSPARK_PYTHON"] = "/root/anaconda3/bin/python3"
os.environ["PATH"] = f"{os.environ['JAVA_HOME']}/bin:{os.environ['HADOOP_HOME']}/bin:{os.environ.get('PATH', '')}"

# 退出HDFS安全模式并创建必要的目录
print("正在退出HDFS安全模式...")
os.system(f"{os.environ['HADOOP_HOME']}/bin/hdfs dfsadmin -safemode leave")
time.sleep(5)  # 等待安全模式退出


# 创建SparkSession
spark = SparkSession.builder\\
    .appName('sparkSQL')\\
    .master('local[*]')\\
    .config("spark.sql.warehouse.dir", "hdfs://192.168.88.161:8020/user/hive/warehouse")\\
    .config("spark.sql.shuffle.partitions", 2)\\
    .config("spark.executor.memory", "2g")\\
    .config("spark.driver.memory", "2g")\\
    .config("spark.jars", "/export/server/spark/jars/mysql-connector-java-5.1.32.jar")\\
    .config("spark.eventLog.enabled", "false")\\
    .config("spark.ui.enabled", "false")\\
    .enableHiveSupport()\\
    .getOrCreate()

# 构建数据结构模式
schema = StructType()\\
    .add("type", StringType(), True)\\
    .add("title", StringType(), True)\\
    .add("companyTitle", StringType(), True)\\
    .add("minSalary", IntegerType(), True)\\
    .add("maxSalary", IntegerType(), True)\\
    .add("workExperience", StringType(), True)\\
    .add("educational", StringType(), True)\\
    .add("detailUrl", StringType(), True)\\
    .add("companyPeople", StringType(), True)\\
    .add("workTag", StringType(), True)\\
    .add("welfare", StringType(), True)\\
    .add("imgSrc", StringType(), True)\\
    .add("city", StringType(), True)

# 读取CSV文件
df = spark.read.format("csv")\\
    .option("sep", ",")\\
    .option("quote", '"')\\
    .option("escape", '"')\\
    .option("header", True)\\
    .option("encoding", "utf-8")\\
    .schema(schema)\\
    .load("hdfs://192.168.88.161:8020/SparkAnalyzeRecruitmentSolution/jobData.csv")

# 数据处理
new_df = df.drop_duplicates(subset=['title', 'companyTitle', 'maxSalary', 'minSalary'])
new_df.show()
print("new_df 的行数为：", new_df.count())

# 使用row_number()生成自增长的唯一ID
window = Window.orderBy("title", "companyTitle", "maxSalary", "minSalary")
new_df = new_df.withColumn('id', row_number().over(window).cast(IntegerType()))

new_df = new_df.withColumn("workExperience",
    when(col("workExperience") == "应届生", "在校/应届").otherwise(col("workExperience")))

# 工作经验映射
experience_mapping = {
    "经验不限": 1, "在校/应届": 2, "1个月": 3, "2个月": 4, "3个月": 5,
    "4个月": 6, "5个月": 7, "6个月": 8, "7个月": 9, "8个月": 10,
    "9个月": 11, "10个月": 12, "11个月": 13, "12个月": 14, "1年以内": 15,
    "1-3年": 16, "3-5年": 17, "5-10年": 18, "10年以上": 19
}

new_df = new_df.withColumn("experience_value",
    when(col("workExperience").isin(list(experience_mapping.keys())), col("workExperience")))
for key, value in experience_mapping.items():
    new_df = new_df.withColumn("experience_value",
        when(col("workExperience") == key, value).otherwise(col("experience_value")))

new_df = new_df.withColumn("experience_value", col("experience_value").cast(IntegerType()))

# 处理薪资
new_df = new_df.withColumn("minSalary",
    when(col("minSalary") <= 1000, col("minSalary") * 26).otherwise(col("minSalary")))
new_df = new_df.withColumn("maxSalary",
    when(col("maxSalary") <= 1000, col("maxSalary") * 26).otherwise(col("maxSalary")))

# 处理缺失值
new_df = new_df.withColumn("workExperience", coalesce(col("workExperience"), lit("经验不限")))
new_df = new_df.withColumn("educational", coalesce(col("educational"), lit("学历不限")))
new_df = new_df.withColumn("workTag", coalesce(col("workTag"), lit("")))

# 注册UDF并处理workTag
remove_trailing_slashes_udf = udf(remove_trailing_slashes, StringType())
new_df = new_df.withColumn("workTag", remove_trailing_slashes_udf(new_df["workTag"]))

# 保存到MySQL
print("\\n正在保存数据到MySQL...")
new_df.write.mode("overwrite").format("jdbc")\\
    .option("url", "jdbc:mysql://192.168.88.161:3306/bigdata?useSSL=false&useUnicode=true&charset=utf8")\\
    .option("dbtable", "jobData")\\
    .option("user", "root")\\
    .option("password", "123456")\\
    .option("encoding", "utf-8")\\
    .save()
print("数据已成功保存到MySQL")

# 保存到Hive
print("\\n正在保存数据到Hive...")
new_df.write.mode("overwrite").saveAsTable("jobData", "parquet")
print("数据已成功保存到Hive")

# 关闭SparkSession
spark.stop()
'''
        
        # 将远程脚本写入文件
        with sftp.open('/tmp/spark_job/remote_spark_job.py', 'w') as f:
            f.write(remote_script)
        
        # 设置环境变量并运行脚本
        command = '''
export JAVA_HOME=/export/server/jdk1.8.0_241 && \\
export HADOOP_HOME=/export/server/hadoop && \\
export HADOOP_CONF_DIR=/export/server/hadoop/etc/hadoop && \\
export SPARK_HOME=/export/server/spark && \\
export PYSPARK_PYTHON=/root/anaconda3/bin/python3 && \\
export PATH=$JAVA_HOME/bin:$HADOOP_HOME/bin:$SPARK_HOME/bin:$PATH && \\
cd /tmp/spark_job && \\
python3 remote_spark_job.py
'''
        
        # 执行命令并获取输出
        stdin, stdout, stderr = ssh.exec_command(command)
        
        # 实时输出执行结果
        while not stdout.channel.exit_status_ready():
            if stdout.channel.recv_ready():
                print(stdout.channel.recv(1024).decode(), end='')
            if stderr.channel.recv_stderr_ready():
                print(stderr.channel.recv_stderr(1024).decode(), end='')
            time.sleep(0.1)
        
        # 获取最后的输出
        remaining_stdout = stdout.read().decode()
        remaining_stderr = stderr.read().decode()
        if remaining_stdout:
            print(remaining_stdout)
        if remaining_stderr:
            print(remaining_stderr)
            
        # 获取退出状态
        exit_status = stdout.channel.recv_exit_status()
        if exit_status != 0:
            raise Exception(f"远程命令执行失败，退出状态码：{exit_status}")
        
        # 清理临时文件
        ssh.exec_command('rm -rf /tmp/spark_job')
        
        # 关闭连接
        sftp.close()
        ssh.close()
        
        return True
    except Exception as e:
        print(f"远程执行失败: {str(e)}")
        return False

if __name__ == '__main__':
    try:
        if not execute_remote_spark_job():
            raise Exception("Spark作业执行失败")
    except Exception as e:
        print(f"发生错误: {str(e)}")
        sys.exit(1)