from pyspark.sql import SparkSession
from pyspark.sql.functions import col, count, avg, desc, regexp_extract, when, split, explode, trim
import pymysql
import pandas as pd
import os
import json

# 创建SparkSession
print(1)
spark = SparkSession.builder.master("local[*]") \
    .appName("PySpark Demo") \
    .config("spark.driver.memory", "4g") \
    .getOrCreate()

print(2)
# 读取CSV文件
print("正在读取数据...")
df = spark.read.csv("1.csv", header=True, encoding="utf-8")

# 数据清洗
print("正在清洗数据...")
# 删除空行
df = df.na.drop(subset=["岗位名称", "岗位薪资", "企业名称"])


# 处理薪资数据
def extract_salary(salary_str):
    if not salary_str or pd.isna(salary_str):
        return None

    # 处理薪资格式
    salary_str = salary_str.lower().replace('k', '000').replace('w', '0000')

    if '-' in salary_str:
        parts = salary_str.split('-')
        try:
            min_salary = float(parts[0])
            max_salary = float(parts[1].split('k')[0].split('w')[0])
            if 'k' in parts[1]:
                max_salary *= 1000
            elif 'w' in parts[1]:
                max_salary *= 10000
            return (min_salary + max_salary) / 2
        except:
            return None
    return None


# 注册UDF
from pyspark.sql.functions import udf
from pyspark.sql.types import FloatType

extract_salary_udf = udf(extract_salary, FloatType())

# 应用UDF转换薪资
df = df.withColumn("平均薪资", extract_salary_udf(col("岗位薪资")))

# 处理技能需求
df = df.withColumn("技能列表", split(col("岗位技能需求"), ","))

print("开始进行数据分析...")

# 分析1：不同岗位类别的需求量分析
job_category_demand = df.groupBy("岗位一级分类").count().orderBy(desc("count"))
print("分析1完成：不同岗位类别的需求量")

# 分析2：不同地区的招聘情况分析
region_demand = df.groupBy("工作省份").count().orderBy(desc("count"))
print("分析2完成：不同地区的招聘情况")

# 分析3：薪资水平分析
salary_analysis = df.groupBy("岗位一级分类").agg(
    avg("平均薪资").alias("平均薪资"),
    count("*").alias("岗位数量")
).orderBy(desc("平均薪资"))
print("分析3完成：薪资水平分析")

# 分析4：经验要求分析
experience_analysis = df.groupBy("经验要求").count().orderBy(desc("count"))
print("分析4完成：经验要求分析")

# 分析5：学历要求分析
education_analysis = df.groupBy("学历要求").count().orderBy(desc("count"))
print("分析5完成：学历要求分析")

# 分析6：技能需求分析
# 展开技能列表
skills_df = df.select(explode(col("技能列表")).alias("技能"))
skills_df = skills_df.withColumn("技能", trim(col("技能")))
skills_analysis = skills_df.groupBy("技能").count().orderBy(desc("count")).filter(col("技能") != "")
print("分析6完成：技能需求分析")

# 将结果转换为Pandas DataFrame以便存入MySQL
job_category_demand_pd = job_category_demand.toPandas()
region_demand_pd = region_demand.toPandas()
salary_analysis_pd = salary_analysis.toPandas()
experience_analysis_pd = experience_analysis.toPandas()
education_analysis_pd = education_analysis.toPandas()
skills_analysis_pd = skills_analysis.limit(50).toPandas()  # 取前50个最热门技能


# 连接MySQL并创建数据库和表
def create_mysql_tables():
    print("正在连接MySQL并创建数据库和表...")

    # 连接MySQL
    conn = pymysql.connect(
        host='localhost',
        user='root',
        password='password',
        charset='utf8mb4'
    )

    cursor = conn.cursor()

    # 创建数据库
    cursor.execute(
        "CREATE DATABASE IF NOT EXISTS it_recruitment_analysis CHARACTER SET utf8mb4 COLLATE utf8mb4_unicode_ci")
    cursor.execute("USE it_recruitment_analysis")

    # 创建表
    # 1. 岗位类别需求表
    cursor.execute("""
    CREATE TABLE IF NOT EXISTS job_category_demand (
        id INT AUTO_INCREMENT PRIMARY KEY,
        category VARCHAR(50),
        demand INT
    ) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_unicode_ci
    """)

    # 2. 地区招聘情况表
    cursor.execute("""
    CREATE TABLE IF NOT EXISTS region_demand (
        id INT AUTO_INCREMENT PRIMARY KEY,
        region VARCHAR(50),
        demand INT
    ) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_unicode_ci
    """)

    # 3. 薪资水平表
    cursor.execute("""
    CREATE TABLE IF NOT EXISTS salary_analysis (
        id INT AUTO_INCREMENT PRIMARY KEY,
        category VARCHAR(50),
        avg_salary FLOAT,
        job_count INT
    ) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_unicode_ci
    """)

    # 4. 经验要求表
    cursor.execute("""
    CREATE TABLE IF NOT EXISTS experience_analysis (
        id INT AUTO_INCREMENT PRIMARY KEY,
        experience VARCHAR(50),
        demand INT
    ) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_unicode_ci
    """)

    # 5. 学历要求表
    cursor.execute("""
    CREATE TABLE IF NOT EXISTS education_analysis (
        id INT AUTO_INCREMENT PRIMARY KEY,
        education VARCHAR(50),
        demand INT
    ) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_unicode_ci
    """)

    # 6. 技能需求表
    cursor.execute("""
    CREATE TABLE IF NOT EXISTS skills_analysis (
        id INT AUTO_INCREMENT PRIMARY KEY,
        skill VARCHAR(50),
        demand INT
    ) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_unicode_ci
    """)

    conn.commit()
    cursor.close()
    conn.close()

    print("数据库和表创建完成")


# 将数据插入MySQL
def insert_data_to_mysql():
    print("正在将分析结果插入MySQL...")

    # 连接MySQL
    conn = pymysql.connect(
        host='localhost',
        user='root',
        password='password',
        db='it_recruitment_analysis',
        charset='utf8mb4'
    )

    cursor = conn.cursor()

    # 清空表
    cursor.execute("TRUNCATE TABLE job_category_demand")
    cursor.execute("TRUNCATE TABLE region_demand")
    cursor.execute("TRUNCATE TABLE salary_analysis")
    cursor.execute("TRUNCATE TABLE experience_analysis")
    cursor.execute("TRUNCATE TABLE education_analysis")
    cursor.execute("TRUNCATE TABLE skills_analysis")

    # 1. 插入岗位类别需求数据
    for _, row in job_category_demand_pd.iterrows():
        cursor.execute(
            "INSERT INTO job_category_demand (category, demand) VALUES (%s, %s)",
            (row['岗位一级分类'], int(row['count']))
        )

    # 2. 插入地区招聘情况数据
    for _, row in region_demand_pd.iterrows():
        cursor.execute(
            "INSERT INTO region_demand (region, demand) VALUES (%s, %s)",
            (row['工作省份'], int(row['count']))
        )

    # 3. 插入薪资水平数据
    for _, row in salary_analysis_pd.iterrows():
        cursor.execute(
            "INSERT INTO salary_analysis (category, avg_salary, job_count) VALUES (%s, %s, %s)",
            (row['岗位一级分类'], float(row['平均薪资']) if row['平均薪资'] else 0, int(row['岗位数量']))
        )

    # 4. 插入经验要求数据
    for _, row in experience_analysis_pd.iterrows():
        cursor.execute(
            "INSERT INTO experience_analysis (experience, demand) VALUES (%s, %s)",
            (row['经验要求'], int(row['count']))
        )

    # 5. 插入学历要求数据
    for _, row in education_analysis_pd.iterrows():
        cursor.execute(
            "INSERT INTO education_analysis (education, demand) VALUES (%s, %s)",
            (row['学历要求'], int(row['count']))
        )

    # 6. 插入技能需求数据
    for _, row in skills_analysis_pd.iterrows():
        cursor.execute(
            "INSERT INTO skills_analysis (skill, demand) VALUES (%s, %s)",
            (row['技能'], int(row['count']))
        )

    conn.commit()
    cursor.close()
    conn.close()

    print("数据插入完成")


# 保存分析结果为JSON格式，以便后续Web可视化使用
def save_results_as_json():
    print("正在保存分析结果为JSON格式...")

    # 创建结果目录
    if not os.path.exists('static/data'):
        os.makedirs('static/data')

    # 保存各项分析结果
    job_category_demand_pd.to_json('static/data/job_category_demand.json', orient='records', force_ascii=False)
    region_demand_pd.to_json('static/data/region_demand.json', orient='records', force_ascii=False)
    salary_analysis_pd.to_json('static/data/salary_analysis.json', orient='records', force_ascii=False)
    experience_analysis_pd.to_json('static/data/experience_analysis.json', orient='records', force_ascii=False)
    education_analysis_pd.to_json('static/data/education_analysis.json', orient='records', force_ascii=False)
    skills_analysis_pd.to_json('static/data/skills_analysis.json', orient='records', force_ascii=False)

    print("JSON文件保存完成")


if __name__ == "__main__":
    try:
        # 创建MySQL数据库和表
        create_mysql_tables()

        # 将数据插入MySQL
        insert_data_to_mysql()

        # 保存结果为JSON
        save_results_as_json()

        print("数据分析和存储完成！")
    except Exception as e:
        print(f"发生错误: {e}")
    finally:
        # 停止SparkSession
        spark.stop()