import numpy as np
import copy
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
import seaborn as sns


class LogisticMap:
    def __init__(self, initial_value=0.1, r=4):
        """
        初始化 LogisticMap 类。

        参数:
        initial_value (float): 初始值 x (0 < initial_value < 1)。
        r (float): 系统参数 (0 < r ≤ 4)。
        """
        self.x = initial_value
        self.r = r
        # self.X = [initial_value] * N
        # x = initial_value
        # for i in range(N):
        #     x = r * x * (1 - x)
        #     self.X[i] = x

    def next(self, lo: float = 0, hi: float = 1):
        """生成并返回一个随机数"""
        self.x = self.r * (self.x) * (1 - self.x)
        # return lo + (hi - lo) * self.x
        return np.random.uniform(lo, hi)
        # return lo + (hi - lo) * self.X[np.random.randint(0, len(self.X))]

    def uniform(self, lo: float = 0, hi: float = 1, size: int = 1):
        """生成并返回一组随机数"""
        return [self.next(lo, hi) for _ in range(size)]


random = LogisticMap(0.1, 4)


# Parse the input data
def parse_input(input_data):
    """
    Parse the input data string.

    Args:
        input_data (str): Input data formatted as described.

    Returns:
        num_jobs, num_machines, job_data, priority_data
    """
    lines = input_data.strip().split("\n")
    num_jobs, num_machines = map(int, lines[0].split())

    job_data = []
    priority_data = []
    for i in range(1, len(lines)):  # Start reading from the second line
        line = list(map(int, lines[i].split()))
        num_operations = line[0]
        operations, priorities = [], []
        idx = 1

        for _ in range(num_operations):
            num_machines_for_op = line[idx]
            idx += 1
            machines = []

            for _ in range(num_machines_for_op):
                machine, processing_time = line[idx], line[idx + 1]
                machines.append((machine, processing_time))
                idx += 2

            operations.append(machines)
            # Sort machines by processing time (primary), then by machine index (secondary)
            priorities.append(
                [machine for machine in sorted(machines, key=lambda x: (x[1], x[0]))]
            )

        job_data.append(operations)
        priority_data.append(priorities)

    return num_jobs, num_machines, job_data, priority_data


def qpso(
    data: str,
    num_particles=500,
    max_iterations: int = 500,
    beta_max: float = 1,
    beta_min: float = 0.5,
    MAXT=5,
):
    num_jobs, num_machines, job_data, priority_data = parse_input(data)
    job_operations = [len(operations) for operations in job_data]
    num_operations = sum(job_operations)
    max_num_operations = max(job_operations)

    def initialize_particles(size):
        particles = []
        for _ in range(size):
            particle = []
            for job_operation in job_data:
                for operations in job_operation:
                    for _ in range(len(operations)):
                        particle.append(random.next(0, len(operations) - 1))
            particles.append(particle)
        return np.array(particles)

    def decode_particle_to_schedule(particle):
        machine_schedule = [[] for _ in range(num_machines)]

        particle_index = 0
        for job_idx, job_operations in enumerate(job_data):
            for op_idx, operations in enumerate(job_operations):
                priority = priority_data[job_idx][op_idx]

                particle[particle_index] = max(
                    0, min(particle[particle_index], len(priority) - 1)
                )
                pos = particle[particle_index]
                int_part = int(pos)
                frac_part = float(pos - int_part)

                selected_machine, processing_time = priority[int_part]
                machine_schedule[selected_machine].append(
                    (frac_part, (job_idx, op_idx, processing_time))
                )
                particle_index += 1

        for machine, operations in enumerate(machine_schedule):
            operations.sort(key=lambda x: -x[0])
            machine_schedule[machine] = [op[1] for op in operations]

        return machine_schedule

    def calculate_fitness(machine_schedule):
        machine_finish_times = [0] * num_machines
        op_finish_times = [[0] * operations for operations in job_operations]
        info = [[0] * operations for operations in job_operations]
        for machine_idx, operations in enumerate(machine_schedule):
            for job_idx, op_idx, processing_time in operations:
                info[job_idx][op_idx] = (machine_idx, processing_time)

        for op_idx in range(max_num_operations):
            for job_idx in range(num_jobs):
                if op_idx >= job_operations[job_idx]:
                    continue
                machine_idx, processing_time = info[job_idx][op_idx]
                # for machine_idx, operations in enumerate(machine_schedule):
                #     for job_idx, op_idx, processing_time in operations:
                start_time = max(
                    machine_finish_times[machine_idx],
                    op_finish_times[job_idx][op_idx - 1] if op_idx > 0 else 0,
                )

                finish_time = start_time + processing_time
                op_finish_times[job_idx][op_idx] = finish_time
                machine_finish_times[machine_idx] = max(
                    machine_finish_times[machine_idx], finish_time
                )

        return max(machine_finish_times)

    def mutate_particle(particle):
        # Perform mutation (e.g., shuffle genes)
        idx = np.random.choice(num_operations, 2, replace=False)
        particle[idx[0]], particle[idx[1]] = particle[idx[1]], particle[idx[0]]
        return particle

    def plot_gantt_chart(machine_schedule):
        print(machine_schedule)

        # Initialize variables
        fig, ax = plt.subplots(figsize=(12, 6))
        machine_colors = plt.cm.tab20.colors  # A colormap for jobs
        machine_height = 0.6  # Height of each machine's bar
        max_time = 0  # To track makespan for setting X-axis limits

        machine_finish_times = [0] * num_machines
        op_finish_times = [[0] * operations for operations in job_operations]
        info = [[0] * operations for operations in job_operations]
        for machine_idx, operations in enumerate(machine_schedule):
            for job_idx, op_idx, processing_time in operations:
                info[job_idx][op_idx] = (machine_idx, processing_time)

        for op_idx in range(max_num_operations):
            for job_idx in range(num_jobs):
                if op_idx >= job_operations[job_idx]:
                    continue
                machine_idx, processing_time = info[job_idx][op_idx]
                # for machine_idx, operations in enumerate(machine_schedule):
                #     for job_idx, op_idx, processing_time in operations:
                start_time = max(
                    machine_finish_times[machine_idx],
                    op_finish_times[job_idx][op_idx - 1] if op_idx > 0 else 0,
                )

                finish_time = start_time + processing_time
                op_finish_times[job_idx][op_idx] = finish_time
                machine_finish_times[machine_idx] = max(
                    machine_finish_times[machine_idx], finish_time
                )

                ax.add_patch(
                    mpatches.Rectangle(
                        (start_time, machine_idx - machine_height / 2),
                        processing_time,
                        machine_height,
                        facecolor=machine_colors[job_idx % len(machine_colors)],
                        edgecolor="black",
                    )
                )
                ax.text(
                    start_time + processing_time / 2,
                    machine_idx,
                    f"{job_idx + 1}-{op_idx + 1}",
                    ha="center",
                    va="center",
                    fontsize=8,
                    color="black",
                )
                max_time = max(max_time, finish_time)

        # Set the chart limits and labels
        ax.set_xlim(0, max_time + 5)
        ax.set_ylim(-0.5, num_machines - 0.5)
        ax.set_yticks(range(num_machines))
        ax.set_yticklabels(
            [f"M{machine_idx + 1}" for machine_idx in range(num_machines)]
        )
        ax.set_xlabel("Makespan")
        ax.set_ylabel("Machine")
        ax.set_title("Gantt Chart for Flexible Job Shop Scheduling")
        ax.grid(axis="x", linestyle="--", alpha=0.7)

        # Show the plot
        # plt.tight_layout()
        plt.show()

    # Initialize particles
    particles = initialize_particles(num_particles)
    pbest_positions = copy.deepcopy(particles)
    pbest_scores = np.full(num_particles, float("inf"))
    gbest_position = None
    gbest_score = float("inf")
    gbest_scores = []
    DELTA = np.zeros(num_particles)

    # Main loop
    for iteration in range(max_iterations):
        # Step 4: Get the schedule
        schedules = [decode_particle_to_schedule(p) for p in particles]

        # Step 5: Evaluate each particle’s fitness (makespan).
        fitness_scores = np.array([calculate_fitness(s) for s in schedules])

        # Step 6: Update personal best (pbest)
        for i in range(num_particles):
            if fitness_scores[i] < pbest_scores[i]:
                pbest_scores[i] = fitness_scores[i]
                pbest_positions[i] = particles[i].copy()
                DELTA[i] = 0
            else:
                DELTA[i] += 1

        # Step 7: Update global best (gbest)
        best_particle_index = np.argmin(fitness_scores)
        if fitness_scores[best_particle_index] < gbest_score:
            gbest_score = fitness_scores[best_particle_index]
            gbest_position = particles[best_particle_index].copy()

        # Step 8: Mutation if no progress
        for i in range(num_particles):
            if DELTA[i] >= random.next(0, MAXT):
                particles[i] = mutate_particle(particles[i])
                DELTA[i] = 0

        # Step 9: Calculate mbest and contraction-expansion factor
        M_best = np.mean(pbest_positions, axis=1)
        beta = beta_max - (beta_max - beta_min) * iteration / max_iterations

        # Step 10: Update particles
        for i, particle in enumerate(particles):
            k, mu, theta = random.uniform(0, 1, 3)
            k_sign = -1 if k >= 0.5 else 1
            for dim in range(num_operations):
                P = theta * pbest_positions[i][dim] + (1 - theta) * gbest_position[dim]
                particle[dim] = P + k_sign * beta * np.abs(
                    M_best[i] - particle[dim]
                ) * np.log(1 / mu)

        gbest_scores.append(gbest_score)

        # Logging or monitoring (optional)
        print(
            f"Iteration {iteration + 1}/{max_iterations}, Best Makespan: {gbest_score}"
        )
    plot_gantt_chart(decode_particle_to_schedule(gbest_position))

    print(f"Best Makespan: {gbest_score}")
    # sns.lineplot(results['gbest_scores'])
    # plt.show()


with open("./data.txt", "r", encoding="utf-8") as file:
    # Run QPSO
    qpso(data=file.read(), num_particles=500, max_iterations=500, MAXT=5)
