from pyspark.sql import SparkSession
from pyspark.sql.functions import *
from pyspark.sql.types import *
import config
import pandas as pd
import os
import sys
from liner_model import SalaryModel

# 获取当前Python解释器的路径
python_path = sys.executable

# 设置环境变量
os.environ['PYSPARK_PYTHON'] = python_path
os.environ['PYSPARK_DRIVER_PYTHON'] = python_path


class SparkProcessor:
    def __init__(self):
        self.spark = SparkSession.builder \
            .appName(config.SPARK_APP_NAME) \
            .master(config.SPARK_MASTER) \
            .config("spark.sql.adaptive.enabled", "true") \
            .config("spark.sql.adaptive.coalescePartitions.enabled", "true") \
            .config("spark.pyspark.python", python_path) \
            .config("spark.pyspark.driver.python", python_path) \
            .getOrCreate()

        self.df = None
        self.salary_model = SalaryModel(self.spark)
        self.model_loaded = False
        self.load_data()
        self.try_load_model()

    def try_load_model(self):
        """尝试加载预训练模型"""
        try:
            model_path = "./model/salary_model"
            if os.path.exists(model_path):
                self.salary_model.load_model(model_path)
                self.model_loaded = True
                print("预训练模型加载成功")
            else:
                print("未找到预训练模型，请先运行 train_model.py 训练模型")
                self.model_loaded = False
        except Exception as e:
            print(f"加载预训练模型失败: {e}")
            self.model_loaded = False

    def load_data(self):
        """从HDFS加载数据 - 解决编码问题和列名问题"""
        try:
            # 构建HDFS路径，读取文件夹下所有文件
            hdfs_path = config.HDFS_URL + "/zhipin/*.csv"
            print(f"尝试从HDFS加载数据: {hdfs_path}")

            # 使用明确的编码格式读取CSV文件
            temp_df = self.spark.read \
                .option("header", "true") \
                .option("inferSchema", "true") \
                .option("encoding", "UTF-8") \
                .option("multiLine", "true") \
                .option("quote", "\"") \
                .option("escape", "\"") \
                .csv(hdfs_path)

            print(f"初始数据记录数: {temp_df.count()}")
            print("原始数据列名:", temp_df.columns)

            # 自动检测并映射列名（处理乱码情况）
            column_mapping = self._detect_and_map_columns(temp_df)

            # 重命名列
            for original_col, english_col in column_mapping.items():
                if original_col in temp_df.columns:
                    temp_df = temp_df.withColumnRenamed(original_col, english_col)
                    print(f"重命名列: {original_col} -> {english_col}")

            # 确保所有需要的列都存在，如果不存在则创建空列
            expected_columns = ['position', 'salary', 'experience', 'education', 'city',
                                'district', 'business_area', 'skills', 'company', 'industry', 'company_size']

            for col_name in expected_columns:
                if col_name not in temp_df.columns:
                    temp_df = temp_df.withColumn(col_name, lit(None).cast(StringType()))

            self.df = temp_df.select(expected_columns)
            print("数据加载成功，记录数:", self.df.count())
            print("最终数据列名:", self.df.columns)

        except Exception as e:
            print(f"数据加载失败: {e}")
            import traceback
            traceback.print_exc()
            # 创建示例数据用于测试
            self.create_sample_data()

    def _detect_and_map_columns(self, df):
        """自动检测并映射列名"""
        column_mapping = {}
        actual_columns = df.columns

        print("检测到的列名:", actual_columns)

        # 常见的列名模式匹配（处理乱码情况）
        patterns = {
            'position': ['职位名称', '职位', '岗位', 'title', 'position'],
            'salary': ['薪资', '薪水', '工资', 'salary', '薪酬'],
            'experience': ['工作经验', '经验', '工作年限', 'experience'],
            'education': ['学历要求', '学历', '教育', 'education'],
            'city': ['城市', '工作城市', 'city', '工作地'],
            'district': ['区域', '地区', '行政区', 'district'],
            'business_area': ['商圈', '商业区', 'business'],
            'skills': ['技能要求', '技能', '要求', 'skills'],
            'company': ['公司名称', '公司', '企业', 'company'],
            'industry': ['行业', '所属行业', '产业', 'industry'],
            'company_size': ['公司规模', '规模', '人数', 'size']
        }

        for english_col, possible_names in patterns.items():
            for actual_col in actual_columns:
                # 检查列名是否包含可能的名称（处理乱码）
                if any(name in actual_col for name in possible_names if isinstance(name, str)):
                    column_mapping[actual_col] = english_col
                    break
                # 如果是乱码，尝试通过位置推断（假设列顺序固定）
                elif len(actual_columns) == len(patterns):
                    idx = list(patterns.keys()).index(english_col)
                    if idx < len(actual_columns):
                        column_mapping[actual_columns[idx]] = english_col

        # 如果自动检测失败，使用默认映射
        if not column_mapping:
            print("自动列名检测失败，使用默认映射")
            default_mapping = {
                '职位名称': 'position',
                '薪资': 'salary',
                '工作经验': 'experience',
                '学历要求': 'education',
                '城市': 'city',
                '区域': 'district',
                '商圈': 'business_area',
                '技能要求': 'skills',
                '公司名称': 'company',
                '行业': 'industry',
                '公司规模': 'company_size'
            }
            for chinese_col, english_col in default_mapping.items():
                if chinese_col in actual_columns:
                    column_mapping[chinese_col] = english_col

        return column_mapping

    def _remove_duplicate_columns(self, df):
        """删除DataFrame中的重复列名"""
        columns = df.columns
        seen = set()
        unique_columns = []
        duplicate_columns = []

        for col in columns:
            if col not in seen:
                seen.add(col)
                unique_columns.append(col)
            else:
                duplicate_columns.append(col)

        if duplicate_columns:
            print(f"发现重复列名: {duplicate_columns}")
            # 删除重复列，只保留第一个出现的列
            for dup_col in duplicate_columns:
                # 找到所有重复列的位置
                indices = [i for i, col_name in enumerate(columns) if col_name == dup_col]
                # 保留第一个，删除其他的
                for idx in indices[1:]:
                    df = df.drop(df.columns[idx])
            print("已删除重复列")

        return df

    def _detect_and_map_columns(self, df):
        """自动检测并映射列名 - 避免重复映射"""
        column_mapping = {}
        actual_columns = df.columns

        print("检测到的列名:", actual_columns)

        # 常见的列名模式匹配（处理乱码情况）
        patterns = {
            'position': ['职位名称', '职位', '岗位', 'title', 'position'],
            'salary': ['薪资', '薪水', '工资', 'salary', '薪酬'],
            'experience': ['工作经验', '经验', '工作年限', 'experience'],
            'education': ['学历要求', '学历', '教育', 'education'],
            'city': ['城市', '工作城市', 'city', '工作地'],
            'district': ['区域', '地区', '行政区', 'district'],
            'business_area': ['商圈', '商业区', 'business'],
            'skills': ['技能要求', '技能', '要求', 'skills'],
            'company': ['公司名称', '公司', '企业', 'company'],
            'industry': ['行业', '所属行业', '产业', 'industry'],
            'company_size': ['公司规模', '规模', '人数', 'size']
        }

        # 首先检查是否已经有英文列名
        existing_english_cols = set(actual_columns) & set(patterns.keys())
        if existing_english_cols:
            print(f"发现已存在的英文列名: {existing_english_cols}")
            # 如果已经有英文列名，直接使用它们
            for english_col in existing_english_cols:
                column_mapping[english_col] = english_col  # 映射到自身

        # 然后映射中文列名到英文列名
        mapped_cols = set()
        for english_col, possible_names in patterns.items():
            if english_col in column_mapping:
                continue  # 已经映射过了

            for actual_col in actual_columns:
                if actual_col in mapped_cols:
                    continue  # 这个列已经被映射过了

                # 检查列名是否包含可能的名称（处理乱码）
                if any(name in actual_col for name in possible_names if isinstance(name, str)):
                    column_mapping[actual_col] = english_col
                    mapped_cols.add(actual_col)
                    break

        # 如果自动检测失败，使用默认映射
        if not column_mapping:
            print("自动列名检测失败，使用默认映射")
            default_mapping = {
                '职位名称': 'position',
                '薪资': 'salary',
                '工作经验': 'experience',
                '学历要求': 'education',
                '城市': 'city',
                '区域': 'district',
                '商圈': 'business_area',
                '技能要求': 'skills',
                '公司名称': 'company',
                '行业': 'industry',
                '公司规模': 'company_size'
            }
            for chinese_col, english_col in default_mapping.items():
                if chinese_col in actual_columns and chinese_col not in mapped_cols:
                    column_mapping[chinese_col] = english_col
                    mapped_cols.add(chinese_col)

        return column_mapping

    def get_salary_stats(self):
        """获取薪资统计信息"""
        try:
            # 检查是否有数据
            if self.df.count() == 0:
                print("数据框为空，无法解析薪资")
                return self._create_empty_salary_df()

            # 更灵活的过滤条件
            salary_df = self.df.filter(
                (col("salary").isNotNull()) &
                (col("salary") != "") &
                (
                        col("salary").contains("k") |
                        col("salary").contains("K") |
                        col("salary").rlike(r"\d+\s*-\s*\d+")
                )
            )

            if salary_df.count() == 0:
                print("没有有效的薪资数据")
                return self._create_empty_salary_df()

            # 多种薪资格式的正则表达式匹配
            salary_processed = salary_df.withColumn(
                # 格式1: "15-25k", "15-25K"
                "min_salary_str1", regexp_extract(col("salary"), r"(\d+)\s*-\s*\d+\s*[kK]", 1)
            ).withColumn(
                "max_salary_str1", regexp_extract(col("salary"), r"\d+\s*-\s*(\d+)\s*[kK]", 1)
            ).withColumn(
                # 格式2: "15k-25k", "15K-25K"
                "min_salary_str2", regexp_extract(col("salary"), r"(\d+)\s*[kK]\s*-\s*\d+\s*[kK]", 1)
            ).withColumn(
                "max_salary_str2", regexp_extract(col("salary"), r"\d+\s*[kK]\s*-\s*(\d+)\s*[kK]", 1)
            ).withColumn(
                # 格式3: "15-25"
                "min_salary_str3", regexp_extract(col("salary"), r"(\d+)\s*-\s*\d+", 1)
            ).withColumn(
                "max_salary_str3", regexp_extract(col("salary"), r"\d+\s*-\s*(\d+)", 1)
            )

            # 选择第一个有效的匹配结果
            salary_processed = salary_processed.withColumn(
                "min_salary_str",
                when(col("min_salary_str1") != "", col("min_salary_str1"))
                .when(col("min_salary_str2") != "", col("min_salary_str2"))
                .when(col("min_salary_str3") != "", col("min_salary_str3"))
                .otherwise("")
            ).withColumn(
                "max_salary_str",
                when(col("max_salary_str1") != "", col("max_salary_str1"))
                .when(col("max_salary_str2") != "", col("max_salary_str2"))
                .when(col("max_salary_str3") != "", col("max_salary_str3"))
                .otherwise("")
            ).drop("min_salary_str1", "max_salary_str1", "min_salary_str2", "max_salary_str2", "min_salary_str3",
                   "max_salary_str3")

            # 过滤有效数据
            valid_salary = salary_processed.filter(
                (col("min_salary_str") != "") &
                (col("max_salary_str") != "") &
                (length(col("min_salary_str")) > 0) &
                (length(col("max_salary_str")) > 0)
            )

            if valid_salary.count() == 0:
                print("没有匹配到有效的薪资格式")
                return self._create_empty_salary_df()

            # 转换为数值类型
            result = valid_salary.withColumn(
                "min_salary", col("min_salary_str").cast("float")
            ).withColumn(
                "max_salary", col("max_salary_str").cast("float")
            ).withColumn(
                "avg_salary", (col("min_salary") + col("max_salary")) / 2
            ).drop("min_salary_str", "max_salary_str")

            print(f"成功解析 {result.count()} 条薪资数据")
            return result

        except Exception as e:
            print(f"获取薪资统计信息失败: {str(e)}")
            import traceback
            traceback.print_exc()
            return self._create_empty_salary_df()

    def _create_empty_salary_df(self):
        """创建空的薪资DataFrame"""
        return self.spark.createDataFrame([], StructType([
            StructField("min_salary", FloatType(), True),
            StructField("max_salary", FloatType(), True),
            StructField("avg_salary", FloatType(), True)
        ]))

    def get_city_salary_data(self):
        """获取城市薪资数据用于箱线图 - 限制显示25个城市"""
        try:
            salary_df = self.get_salary_stats()

            # 检查是否有有效数据
            if salary_df.count() == 0:
                print("没有薪资数据可用于城市分析")
                return pd.DataFrame(columns=['city', 'median', 'q1', 'q3', 'count'])

            # 确保city列不为空，并按数据量排序选择前25个城市
            city_counts = salary_df.filter(
                col("city").isNotNull() & (col("city") != "")
            ).groupBy("city").agg(
                count("*").alias("count")
            ).orderBy(col("count").desc())

            # 获取数据量最多的25个城市
            top_cities = [row['city'] for row in city_counts.limit(25).collect()]

            if not top_cities:
                print("没有找到足够的城市数据")
                return pd.DataFrame(columns=['city', 'median', 'q1', 'q3', 'count'])

            # 只分析这些城市的薪资数据
            city_salary = salary_df.filter(col("city").isin(top_cities)).groupBy("city").agg(
                expr("percentile_approx(avg_salary, 0.5)").alias("median"),
                expr("percentile_approx(avg_salary, 0.25)").alias("q1"),
                expr("percentile_approx(avg_salary, 0.75)").alias("q3"),
                count("*").alias("count")
            ).filter(col("count") > 0).orderBy(col("median").desc())

            result = city_salary.toPandas()

            # 确保返回的DataFrame包含必要的列
            if result.empty:
                print("城市薪资统计结果为空")
                return pd.DataFrame(columns=['city', 'median', 'q1', 'q3', 'count'])

            print(f"箱线图将显示 {len(result)} 个城市的薪资数据")
            return result

        except Exception as e:
            print(f"获取城市薪资数据失败: {str(e)}")
            import traceback
            traceback.print_exc()
            return pd.DataFrame(columns=['city', 'median', 'q1', 'q3', 'count'])

    def get_industry_city_salary(self):
        """获取行业和城市的薪资热力图数据 - 限制25个行业和25个城市，薪资单位转换为k"""
        try:
            salary_df = self.get_salary_stats()

            # 检查是否有有效数据
            if salary_df.count() == 0:
                print("没有薪资数据用于行业城市分析")
                return pd.DataFrame(columns=['industry', 'city', 'avg_salary', 'count'])

            # 获取数据量最多的25个行业
            top_industries = salary_df.filter(
                col("industry").isNotNull() & (col("industry") != "")
            ).groupBy("industry").agg(
                count("*").alias("count")
            ).orderBy(col("count").desc()).limit(25)

            top_industry_list = [row['industry'] for row in top_industries.collect()]

            # 获取数据量最多的25个城市
            top_cities = salary_df.filter(
                col("city").isNotNull() & (col("city") != "")
            ).groupBy("city").agg(
                count("*").alias("count")
            ).orderBy(col("count").desc()).limit(25)

            top_city_list = [row['city'] for row in top_cities.collect()]

            if not top_industry_list or not top_city_list:
                print("没有足够的行业或城市数据")
                return pd.DataFrame(columns=['industry', 'city', 'avg_salary', 'count'])

            # 确保industry和city列不为空，并且属于前25个
            industry_city_salary = salary_df.filter(
                col("industry").isin(top_industry_list) &
                col("city").isin(top_city_list)
            ).groupBy("industry", "city").agg(
                # 将薪资除以1000，转换为k单位
                (avg("avg_salary") / 1000).alias("avg_salary"),
                count("*").alias("count")
            ).filter(col("count") > 0)

            result = industry_city_salary.toPandas()

            # 确保返回的DataFrame包含必要的列
            if result.empty:
                print("行业城市薪资统计结果为空")
                return pd.DataFrame(columns=['industry', 'city', 'avg_salary', 'count'])

            print(
                f"热力图将显示 {len(result['industry'].unique())} 个行业和 {len(result['city'].unique())} 个城市的数据")
            print(f"薪资范围: {result['avg_salary'].min():.1f}k - {result['avg_salary'].max():.1f}k")
            return result

        except Exception as e:
            print(f"获取行业城市薪资数据失败: {str(e)}")
            import traceback
            traceback.print_exc()
            return pd.DataFrame(columns=['industry', 'city', 'avg_salary', 'count'])

    def search_positions(self, city=None, industry=None, salary_range=None):
        """搜索职位"""
        query = self.df

        if city and city != "全部":
            query = query.filter(col("city").contains(city))

        if industry and industry != "全部":
            query = query.filter(col("industry").contains(industry))

        if salary_range and salary_range != "全部":
            if salary_range == "10k以下":
                query = query.filter(
                    (col("salary").contains("k") | col("salary").contains("K"))
                ).filter(
                    regexp_extract(col("salary"), r"(\d+)\s*-\s*", 1).cast("int") < 10
                )
            elif salary_range == "10-20k":
                query = query.filter(
                    (col("salary").contains("k") | col("salary").contains("K"))
                ).filter(
                    (regexp_extract(col("salary"), r"(\d+)\s*-\s*", 1).cast("int") >= 10) &
                    (regexp_extract(col("salary"), r"(\d+)\s*-\s*", 1).cast("int") < 20)
                )
            elif salary_range == "20-30k":
                query = query.filter(
                    (col("salary").contains("k") | col("salary").contains("K"))
                ).filter(
                    (regexp_extract(col("salary"), r"(\d+)\s*-\s*", 1).cast("int") >= 20) &
                    (regexp_extract(col("salary"), r"(\d+)\s*-\s*", 1).cast("int") < 30)
                )
            elif salary_range == "30k以上":
                query = query.filter(
                    (col("salary").contains("k") | col("salary").contains("K"))
                ).filter(
                    regexp_extract(col("salary"), r"(\d+)\s*-\s*", 1).cast("int") >= 30
                )

        # 返回所有匹配的结果，由Flask进行分页
        result = query.limit(1000).toPandas()  # 限制最大返回1000条记录

        # 确保返回的是DataFrame，即使为空
        if result.empty:
            return pd.DataFrame(columns=['position', 'salary', 'experience', 'education', 'city',
                                         'district', 'business_area', 'skills', 'company', 'industry', 'company_size'])
        return result

    def get_skills_data(self, position_type):
        """获取技能数据用于词云"""
        try:
            if position_type == "数据分析":
                keywords = ["数据分析", "数据专员", "数据运营"]
            elif position_type == "BI开发工程师":
                keywords = ["BI", "商业智能", "BI开发", "数据可视化"]
            elif position_type == "游戏测试":
                keywords = ["游戏测试", "测试工程师", "QA", "游戏QA"]
            else:
                keywords = [position_type]

            # 筛选相关职位
            condition = None
            for keyword in keywords:
                if condition is None:
                    condition = col("position").contains(keyword)
                else:
                    condition = condition | col("position").contains(keyword)

            skills_df = self.df.filter(condition).select("skills")
            skills_list = [row.skills for row in skills_df.collect() if row.skills]
            skills_text = " ".join(skills_list) if skills_list else ""

            return skills_text
        except Exception as e:
            print(f"获取技能数据失败: {e}")
            return ""

    def get_education_data(self):
        """获取学历分布数据"""
        try:
            education_dist = self.df.groupBy("education").agg(
                count("*").alias("count")
            ).toPandas()

            if education_dist.empty:
                return pd.DataFrame(columns=['education', 'count'])

            return education_dist
        except Exception as e:
            print(f"获取学历数据失败: {e}")
            return pd.DataFrame(columns=['education', 'count'])

    def get_industry_education_data(self):
        """获取行业学历要求数据 - 限制显示行业数量"""
        try:
            # 获取数据量最多的前15个行业
            industry_counts = self.df.filter(
                col("industry").isNotNull() & (col("industry") != "")
            ).groupBy("industry").agg(
                count("*").alias("count")
            ).orderBy(col("count").desc()).limit(15)

            top_industries = [row['industry'] for row in industry_counts.collect()]

            if not top_industries:
                return pd.DataFrame(columns=['industry', 'education', 'count'])

            # 只分析这些行业的学历数据
            industry_education = self.df.filter(
                col("industry").isin(top_industries) &
                col("education").isNotNull() & (col("education") != "")
            ).groupBy("industry", "education").agg(
                count("*").alias("count")
            ).toPandas()

            if industry_education.empty:
                return pd.DataFrame(columns=['industry', 'education', 'count'])

            print(f"堆叠柱状图将显示 {len(industry_education['industry'].unique())} 个行业的学历分布")
            return industry_education

        except Exception as e:
            print(f"获取行业学历数据失败: {e}")
            return pd.DataFrame(columns=['industry', 'education', 'count'])

    def train_salary_model(self):
        """训练薪资预测模型"""
        try:
            salary_df = self.get_salary_stats()
            model = self.salary_model.train(salary_df)

            # 确保模型目录存在
            os.makedirs(config.MODEL_PATH, exist_ok=True)
            print(f"模型将保存到: {config.MODEL_PATH}")

            return model
        except Exception as e:
            print(f"训练薪资模型失败: {e}")
            raise e

    def predict_salary(self, city, industry, education, experience, company_size):
        """预测薪资 - 使用预训练模型"""
        try:
            if not self.model_loaded:
                return "模型未加载，请先运行 train_model.py 训练模型"

            result = self.salary_model.predict(city, industry, education, experience, company_size)
            return f"{result}k"  # 返回带单位的字符串
        except Exception as e:
            print(f"预测错误: {e}")
            return "预测失败，请检查模型状态"

    def save_model(self, model_path):
        """保存模型"""
        try:
            # 确保目录存在
            os.makedirs(model_path, exist_ok=True)
            self.salary_model.save_model(model_path)
            self.model_loaded = True
            print(f"模型已保存到: {model_path}")
        except Exception as e:
            print(f"保存模型失败: {e}")
            raise e

    def load_model(self, model_path):
        """加载模型"""
        try:
            # 使用文件协议确保Windows路径正确
            if not model_path.startswith("file://"):
                model_path = "file:///" + os.path.abspath(model_path).replace('\\', '/')

            print(f"加载模型路径: {model_path}")
            self.salary_model.load_model(model_path)
            self.model_loaded = True
            return True
        except Exception as e:
            print(f"加载模型失败: {e}")
            self.model_loaded = False
            return False

    def _fix_column_names(self, df):
        """修复列名乱码问题"""
        actual_columns = df.columns
        print("检测到的原始列名:", actual_columns)

        # 如果列名是乱码，尝试推断正确的列名
        if any('?' in col or '�' in col for col in actual_columns) or len(actual_columns) == 0:
            print("检测到乱码列名，尝试修复...")

            # 根据列数推断列名
            if len(actual_columns) == 11:
                # 假设列顺序是标准的11列
                standard_columns = ['position', 'salary', 'experience', 'education', 'city',
                                    'district', 'business_area', 'skills', 'company', 'industry', 'company_size']
                # 重命名列
                for i, col in enumerate(actual_columns):
                    df = df.withColumnRenamed(col, standard_columns[i])
                print("已根据列顺序重命名列")
            else:
                print(f"列数不匹配: 期望11列，实际{len(actual_columns)}列")
        else:
            # 正常列名，使用模式匹配
            column_mapping = self._detect_and_map_columns(df)
            for original_col, english_col in column_mapping.items():
                if original_col in df.columns:
                    df = df.withColumnRenamed(original_col, english_col)
                    print(f"重命名列: {original_col} -> {english_col}")

        return df