import pandas as pd
import numpy as np
from pulp import LpMaximize, LpProblem, LpVariable, lpSum, value
import time
import random


def generate_random_data(num_applicants, num_positions):
    applicants_data = {
        '应聘者': [f'{i + 1}' for i in range(num_applicants)],
        '笔试成绩': np.random.randint(270, 300, num_applicants).tolist(),
        '知识面评分': np.random.randint(1, 5, num_applicants).tolist(),
        '理解能力评分': np.random.randint(1, 5, num_applicants).tolist(),
        '应变能力评分': np.random.randint(1, 5, num_applicants).tolist(),
        '表达能力评分': np.random.randint(1, 5, num_applicants).tolist(),
        '意愿1': [f'部门{np.random.randint(1, num_positions + 1)}' for _ in range(num_applicants)],
        '意愿2': [f'部门{np.random.randint(1, num_positions + 1)}' for _ in range(num_applicants)]
    }

    positions_data = {
        '用人部门': [f'部门{j + 1}' for j in range(num_positions)],
        '知识面要求': np.random.randint(1, 5, num_positions).tolist(),
        '理解能力要求': np.random.randint(1, 5, num_positions).tolist(),
        '应变能力要求': np.random.randint(1, 5, num_positions).tolist(),
        '表达能力要求': np.random.randint(1, 5, num_positions).tolist(),
        '福利待遇': np.random.randint(1, 5, num_positions).tolist(),
        '工作条件': np.random.randint(1, 5, num_positions).tolist(),
        '劳动强度': np.random.randint(1, 5, num_positions).tolist(),
        '晋升机会': np.random.randint(1, 5, num_positions).tolist(),
        '深造机会': np.random.randint(1, 5, num_positions).tolist()
    }

    return pd.DataFrame(applicants_data), pd.DataFrame(positions_data)


def calculate_match(applicant, position, alpha=0.5, beta=0.1, gamma=0.2):
    exam_score = (applicant['笔试成绩'] - min(df_applicants['笔试成绩'])) / (
            max(df_applicants['笔试成绩']) - min(df_applicants['笔试成绩']))
    exam_score *= 1 + 2 * exam_score
    match = (
            alpha * exam_score -
            (1 - alpha) * (
                    abs(applicant['知识面评分'] - position['知识面要求']) +
                    abs(applicant['理解能力评分'] - position['理解能力要求']) +
                    abs(applicant['应变能力评分'] - position['应变能力要求']) +
                    abs(applicant['表达能力评分'] - position['表达能力要求'])
            )
    )
    if position.name in [applicant['意愿1'], applicant['意愿2']]:
        match += beta * 100
    match += gamma * (
            position['福利待遇'] +
            position['工作条件'] +
            position['劳动强度'] +
            position['晋升机会'] +
            position['深造机会']
    )
    return match


def create_match_matrix(df_applicants, df_positions, alpha, beta, gamma):
    match_matrix = []
    for _, applicant in df_applicants.iterrows():
        matches = []
        for _, position in df_positions.iterrows():
            match = calculate_match(applicant, position, alpha, beta, gamma)
            matches.append(match)
        match_matrix.append(matches)
    return pd.DataFrame(match_matrix, columns=df_positions['用人部门'], index=df_applicants['应聘者'])


def solve_lp_problem(df_applicants, df_positions, alpha, beta, gamma, num_selected):
    df_match = create_match_matrix(df_applicants, df_positions, alpha, beta, gamma)
    prob = LpProblem("RecruitmentProblem", LpMaximize)
    x = LpVariable.dicts("x", (df_applicants['应聘者'], df_positions['用人部门']), cat='Binary')

    # 目标函数
    prob += lpSum([df_match.loc[i, j] * x[i][j] for i in df_applicants['应聘者'] for j in df_positions['用人部门']])

    # 每个应聘者只能分配到一个岗位
    for i in df_applicants['应聘者']:
        prob += lpSum([x[i][j] for j in df_positions['用人部门']]) <= 1

    # 每个岗位至少有1人
    for j in df_positions['用人部门']:
        prob += lpSum([x[i][j] for i in df_applicants['应聘者']]) >= 1

    # 总共只选择指定数量的应聘者
    prob += lpSum([x[i][j] for i in df_applicants['应聘者'] for j in df_positions['用人部门']]) == num_selected

    # 求解问题
    prob.solve()
    total_match_score = value(prob.objective)

    # 输出分配结果
    allocation = {j: [] for j in df_positions['用人部门']}
    for i in df_applicants['应聘者']:
        for j in df_positions['用人部门']:
            if x[i][j].varValue == 1:
                allocation[j].append(i)

    return total_match_score, allocation


# 生成测试数据
df_applicants, df_positions = generate_random_data(50, 7)
df_applicants_large, df_positions_large = generate_random_data(4000, 20)

# 网格搜索
alpha = 0.5
beta_values = [0.05, 0.1, 0.15, 0.2]
gamma_values = [0.1, 0.2, 0.3, 0.4]
best_score = -np.inf
best_params = (None, None)
best_allocation = None

for beta in beta_values:
    for gamma in gamma_values:
        score, allocation = solve_lp_problem(df_applicants, df_positions, alpha, beta, gamma, num_selected=8)
        if score > best_score:
            best_score = score
            best_params = (beta, gamma)
            best_allocation = allocation

print("Best score for small scale:", best_score)
print("Best beta and gamma for small scale:", best_params)
print("Best allocation for small scale:", best_allocation)

# 打印小规模分配结果
print("\nSmall scale allocation result:")
for dept, applicants in best_allocation.items():
    print(f"{dept}: {', '.join(applicants)}")

# 对大规模数据集进行网格搜索
best_score_large = -np.inf
best_allocation_large = None

for beta in beta_values:
    for gamma in gamma_values:
        score, allocation = solve_lp_problem(df_applicants_large, df_positions_large, alpha, beta, gamma,
                                             num_selected=22)
        if score > best_score_large:
            best_score_large = score
            best_allocation_large = allocation

print("\nBest score for large scale:", best_score_large)
print("Best beta and gamma for large scale:", best_params)
print("Best allocation for large scale:", best_allocation_large)

# 打印大规模分配结果
print("\nLarge scale allocation result:")
for dept, applicants in best_allocation_large.items():
    print(f"{dept}: {', '.join(applicants)}")

# 评估模型在不同时间下的消耗
def evaluate_performance(df_applicants, df_positions, alpha, beta, gamma, num_selected):
    start_time = time.time()
    score, allocation = solve_lp_problem(df_applicants, df_positions, alpha, beta, gamma, num_selected)
    end_time = time.time()
    duration = end_time - start_time
    return score, duration


# 性能评估
small_scale_score, small_duration = evaluate_performance(df_applicants, df_positions, alpha, beta, gamma,
                                                         num_selected=8)
large_scale_score, large_duration = evaluate_performance(df_applicants_large, df_positions_large, alpha, beta, gamma,
                                                         num_selected=22)

print("Small scale duration:", small_duration)
print("Large scale duration:", large_duration)

# 灵敏度分析
alpha_values = [0.4, 0.5, 0.6]
beta_values = [0.1, 0.15, 0.2]
gamma_values = [0.1, 0.15, 0.2]
sensitivity_results = []

for alpha in alpha_values:
    for beta in beta_values:
        for gamma in gamma_values:
            score, duration = evaluate_performance(df_applicants, df_positions, alpha, beta, gamma, num_selected=8)
            sensitivity_results.append(
                {'alpha': alpha, 'beta': beta, 'gamma': gamma, 'score': score, 'duration': duration})

df_sensitivity = pd.DataFrame(sensitivity_results)
print(df_sensitivity)


# 误差分析
def add_noise(data, noise_level=5):
    noisy_data = []
    for score in data:
        noisy_score = score + random.uniform(-noise_level, noise_level)
        noisy_data.append(noisy_score)
    return noisy_data


# 在笔试成绩中添加误差
noisy_exam_scores = add_noise(df_applicants['笔试成绩'])
df_applicants['笔试成绩'] = noisy_exam_scores

noisy_score, noisy_duration = evaluate_performance(df_applicants, df_positions, alpha, beta, gamma, num_selected=8)

print("Noisy score:", noisy_score)
print("Noisy duration:", noisy_duration)
