import torch
import torch.nn as nn
import torch.optim as optim
import numpy as np
import matplotlib.pyplot as plt
import random
import json
import os
import time
import torch.nn.functional as F
from textwrap import wrap

# --- Global Configuration ---
SEED = 123
torch.manual_seed(SEED)
np.random.seed(SEED)
random.seed(SEED)
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
print(f"Using device: {device}")

# --- Environment Parameters ---
AREA_SIZE = 100.0
NUM_USERS = 8
MAX_STEPS = 100
MAX_DISTANCE_COLLECT = 10.0
UAV_SPEED_OPTIONS = {"low": 5.0, "medium": 10.0, "high": 15.0}
BASE_STATION_POS = np.array([AREA_SIZE / 2, AREA_SIZE / 2])

# ==============================================================================
# VVVVVVVVVVVVVVVVVVVVVV  Key Parameter Adjustments VVVVVVVVVVVVVVVVVVVVVVVV
# ==============================================================================
# --- StarPO-S Training Parameters ---
SFT_DATASET_SIZE = 1000
SFT_DATA_FILE = "starpo_sft_data_final_en.json"
STUDENT_MODEL_FILE = "starpo_student_model_final_en.pth"
VOCAB_SIZE = 4096
EMBED_DIM = 256
NUM_HEADS = 8
NUM_LAYERS = 6
EPOCHS = 30
BATCH_SIZE = 16
LEARNING_RATE = 1e-4
# ==============================================================================
# ^^^^^^^^^^^^^^^^^^^^^^^^  End of Adjustments ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
# ==============================================================================

os.makedirs("starpo_results_en", exist_ok=True)


class UAVEnv:
    def __init__(self, dynamic=True):
        self.dynamic = dynamic
        self.user_positions = self._generate_user_positions()
        self.reset()

    def _generate_user_positions(self):
        positions = []
        for i in range(NUM_USERS):
            quadrant = i % 4
            if quadrant == 0:
                x_range, y_range = (0, AREA_SIZE / 2), (AREA_SIZE / 2, AREA_SIZE)
            elif quadrant == 1:
                x_range, y_range = (AREA_SIZE / 2, AREA_SIZE), (AREA_SIZE / 2, AREA_SIZE)
            elif quadrant == 2:
                x_range, y_range = (0, AREA_SIZE / 2), (0, AREA_SIZE / 2)
            else:
                x_range, y_range = (AREA_SIZE / 2, AREA_SIZE), (0, AREA_SIZE / 2)
            positions.append(np.random.uniform(low=x_range[0], high=x_range[1], size=2))
        return np.array(positions)

    def reset(self):
        self.uav_position = np.array([0.0, 0.0])
        self.step_count = 0
        if self.dynamic:
            num_active = np.random.randint(4, NUM_USERS + 1)
            active_indices = np.random.choice(NUM_USERS, num_active, replace=False)
            self.task_generating_users = np.zeros(NUM_USERS, dtype=bool)
            self.task_generating_users[active_indices] = True
        else:
            self.task_generating_users = np.ones(NUM_USERS, dtype=bool)
        self.collected_tasks = np.zeros(NUM_USERS, dtype=bool)
        self.trajectory = [self.uav_position.copy()]
        return self.get_state_description()

    def get_state_description(self):
        uncollected_users = []
        for i in range(NUM_USERS):
            if self.task_generating_users[i] and not self.collected_tasks[i]:
                dist = np.linalg.norm(self.uav_position - self.user_positions[i])
                uncollected_users.append(
                    f"  - User {i}: Position ({self.user_positions[i][0]:.1f}, {self.user_positions[i][1]:.1f}), distance {dist:.1f}m")
        uncollected_str = "\n".join(uncollected_users) if uncollected_users else "  - None"
        bs_dist = np.linalg.norm(self.uav_position - BASE_STATION_POS)
        description = (
            f"Environment State:\n"
            f"- UAV Position: ({self.uav_position[0]:.1f}, {self.uav_position[1]:.1f})\n"
            f"- Base Station Position: ({BASE_STATION_POS[0]:.1f}, {BASE_STATION_POS[1]:.1f}), distance {bs_dist:.1f}m\n"
            f"- Remaining Steps: {MAX_STEPS - self.step_count}\n"
            f"- Users with pending tasks:\n{uncollected_str}\n"
            f"Please output the action JSON."
        )
        return description

    def step(self, semantic_action):
        target_pos = self.uav_position  # Default to hover
        if semantic_action:  # Check if action is not None
            target_type = semantic_action.get("target_type")
            target_id = semantic_action.get("target_id", -1)
            speed_key = semantic_action.get("speed", "medium")
            speed = UAV_SPEED_OPTIONS.get(speed_key, UAV_SPEED_OPTIONS["medium"])

            if target_type == "user" and 0 <= target_id < NUM_USERS:
                target_pos = self.user_positions[target_id]
            elif target_type == "position":
                target_pos = np.array(semantic_action.get("target_coords", self.uav_position))

        direction = target_pos - self.uav_position
        dist = np.linalg.norm(direction)
        if dist > 1e-6:
            move_dist = min(UAV_SPEED_OPTIONS[speed_key], dist)
            self.uav_position += (direction / dist) * move_dist

        self.trajectory.append(self.uav_position.copy())
        self.step_count += 1
        newly_collected = 0
        for i in range(NUM_USERS):
            if self.task_generating_users[i] and not self.collected_tasks[i]:
                if np.linalg.norm(self.uav_position - self.user_positions[i]) <= MAX_DISTANCE_COLLECT:
                    self.collected_tasks[i] = True
                    newly_collected += 1
        reward = newly_collected * 50 - 1
        total_required = np.sum(self.task_generating_users)
        collected_required = np.sum(self.collected_tasks[self.task_generating_users])
        done = (self.step_count >= MAX_STEPS) or (total_required > 0 and collected_required == total_required)
        info = {
            "collected": collected_required,
            "total": total_required,
            "completion_rate": collected_required / total_required if total_required > 0 else 1.0,
        }
        return self.get_state_description(), reward, done, info

    def render(self, student_action, episode, step):
        plt.figure(figsize=(10, 12))
        ax = plt.subplot(1, 1, 1)
        ax.scatter(BASE_STATION_POS[0], BASE_STATION_POS[1], s=200, c='orange', marker='s', label='Base Station')
        active_users_mask = self.task_generating_users & ~self.collected_tasks
        collected_users_mask = self.collected_tasks
        ax.scatter(self.user_positions[:, 0], self.user_positions[:, 1], s=100, c='gray', alpha=0.3,
                   label='Inactive/No Task Users')
        if np.any(active_users_mask):
            ax.scatter(self.user_positions[active_users_mask, 0], self.user_positions[active_users_mask, 1], s=120,
                       c='red', label='Pending')
        if np.any(collected_users_mask):
            ax.scatter(self.user_positions[collected_users_mask, 0], self.user_positions[collected_users_mask, 1],
                       s=120, facecolors='none', edgecolors='green', linewidth=2, label='Collected')
        for i, pos in enumerate(self.user_positions):
            ax.text(pos[0], pos[1] + 2, f"U{i}", fontsize=9, ha='center')
        if len(self.trajectory) > 1:
            traj = np.array(self.trajectory)
            ax.plot(traj[:, 0], traj[:, 1], 'b-o', markersize=3, alpha=0.6, label='Trajectory')
        ax.scatter(self.uav_position[0], self.uav_position[1], s=250, c='blue', marker='*', label='UAV')

        action_text = f"Episode: {episode}, Step: {step}\nStudent Model's Decided Action:\n"
        action_text += json.dumps(student_action, indent=2)
        plt.figtext(0.5, 0.01, action_text, ha="center", fontsize=10,
                    bbox={"facecolor": "lightgray", "alpha": 0.5, "pad": 5})

        ax.set_title(f"StarPO-S Decision Process (Episode {episode}, Step {step})")
        ax.set_xlim(-5, AREA_SIZE + 5)
        ax.set_ylim(-5, AREA_SIZE + 5)
        ax.grid(True)
        ax.legend(loc="upper right")
        plt.tight_layout(rect=[0, 0.15, 1, 1])
        plt.savefig(f"starpo_results_en/ep{episode}_step{step}.png")
        plt.close()


# ===== 核心修正 1: 简化教师模型，只输出action =====
class TeacherLLM:
    def generate_decision(self, state_description):
        uncollected_users = self._parse_users(state_description)
        action = {}
        if not uncollected_users:
            action = {"target_type": "position", "target_coords": BASE_STATION_POS.tolist(), "speed": "medium"}
        else:
            closest_user_id = min(uncollected_users.keys(), key=lambda uid: uncollected_users[uid]['dist'])
            action = {"target_type": "user", "target_id": closest_user_id, "speed": "high"}
        return json.dumps(action)

    def _parse_users(self, desc):
        users = {}
        try:
            user_lines_block = desc.split("Users with pending tasks:\n")[1]
            lines = user_lines_block.split('\n')
            for line in lines:
                if "- User" not in line: continue
                try:
                    id_part = line.split('User ')[1].split(':')[0]
                    user_id = int(id_part.strip())
                    dist_part = line.split('distance ')[1].split('m')[0]
                    dist = float(dist_part.strip())
                    users[user_id] = {"dist": dist}
                except (IndexError, ValueError):
                    continue
        except IndexError:
            return {}
        return users


class StudentLLM(nn.Module):
    def __init__(self, vocab_size, embed_dim, num_heads, num_layers):
        super().__init__()
        self.embed = nn.Embedding(vocab_size, embed_dim)
        self.pos_encoder = nn.Parameter(torch.zeros(1, 1024, embed_dim))
        self.transformer = nn.Transformer(
            d_model=embed_dim, nhead=num_heads,
            num_encoder_layers=num_layers, num_decoder_layers=num_layers,
            dim_feedforward=embed_dim * 4, batch_first=True, dropout=0.1
        )
        self.fc_out = nn.Linear(embed_dim, vocab_size)

    def forward(self, src, tgt):
        src_embed = self.embed(src) + self.pos_encoder[:, :src.shape[1], :]
        tgt_embed = self.embed(tgt) + self.pos_encoder[:, :tgt.shape[1], :]
        tgt_mask = nn.Transformer.generate_square_subsequent_mask(tgt.size(1)).to(device)
        output = self.transformer(src_embed, tgt_embed, tgt_mask=tgt_mask)
        return self.fc_out(output)

    def generate(self, src, max_len=150):
        self.eval()
        with torch.no_grad():
            src_embed = self.embed(src) + self.pos_encoder[:, :src.shape[1], :]
            memory = self.transformer.encoder(src_embed)
            tgt = torch.tensor([[ord('{')]], dtype=torch.long, device=device)
            for _ in range(max_len - 1):
                tgt_embed = self.embed(tgt) + self.pos_encoder[:, :tgt.shape[1], :]
                output_logits = self.transformer.decoder(tgt_embed, memory)
                next_token_logits = self.fc_out(output_logits[:, -1, :])
                next_token = torch.argmax(next_token_logits, dim=-1, keepdim=True)
                tgt = torch.cat([tgt, next_token], dim=1)
                if next_token.item() == ord('}'):
                    break
            return "".join([chr(c) for c in tgt.cpu().numpy().flatten()])


def text_to_tensor(text):
    return torch.tensor([ord(c) for c in text if ord(c) < VOCAB_SIZE], dtype=torch.long, device=device)


# ===== 核心修正 2: 简化解析逻辑 =====
def parse_llm_output(output_str):
    try:
        start = output_str.find('{')
        end = output_str.rfind('}') + 1
        if start != -1 and end != 0:
            json_str = output_str[start:end]
            # Directly parse the string as the action
            action = json.loads(json_str)
            return action
    except (json.JSONDecodeError, KeyError) as e:
        print(f"!! Failed to parse LLM output: {e}\nOutput: '{output_str}'")
        # Return a None action to signify failure
        return None


def generate_sft_data():
    if os.path.exists(SFT_DATA_FILE):
        print(f"Found existing data file '{SFT_DATA_FILE}', skipping generation.")
        return
    print("--- Phase 1: Start generating high-quality SFT data ---")
    teacher = TeacherLLM()
    env = UAVEnv()
    dataset = []
    start_time = time.time()
    # ===== 核心修正 3: 数据生成逻辑与教师模型同步 =====
    for i in range(SFT_DATASET_SIZE):
        state_desc = env.reset()
        done = False
        temp_uav_pos = env.uav_position  # Store initial position
        for _ in range(5):
            if done: break
            teacher_action_json = teacher.generate_decision(state_desc)
            # The output is now just the action
            dataset.append({"input": state_desc, "output": teacher_action_json})
            action = parse_llm_output(teacher_action_json)
            state_desc, _, done, _ = env.step(action)
        if (i + 1) % 100 == 0:
            print(f"Generated data points {len(dataset)}/{SFT_DATASET_SIZE * 5} (approx)...")
    end_time = time.time()
    print(f"--- Data generation complete. Took {end_time - start_time:.2f} seconds. Saved to '{SFT_DATA_FILE}' ---")
    with open(SFT_DATA_FILE, 'w', encoding='utf-8') as f:
        json.dump(dataset, f, indent=2)


def train_student_model():
    if not os.path.exists(SFT_DATA_FILE):
        print(f"Error: SFT data file '{SFT_DATA_FILE}' not found.")
        return
    print("\n--- Phase 2: Start training the student model ---")
    with open(SFT_DATA_FILE, 'r', encoding='utf-8') as f:
        dataset = json.load(f)
    student_model = StudentLLM(VOCAB_SIZE, EMBED_DIM, NUM_HEADS, NUM_LAYERS).to(device)
    optimizer = optim.Adam(student_model.parameters(), lr=LEARNING_RATE, weight_decay=1e-4)
    criterion = nn.CrossEntropyLoss(ignore_index=0)
    student_model.train()
    for epoch in range(EPOCHS):
        total_loss, batch_count = 0, 0
        random.shuffle(dataset)
        for i in range(0, len(dataset), BATCH_SIZE):
            batch = dataset[i:i + BATCH_SIZE]
            if not batch: continue
            optimizer.zero_grad()
            src_batch = [text_to_tensor(item['input']) for item in batch]
            tgt_batch = [text_to_tensor(item['output']) for item in batch]
            valid_indices = [i for i, (s, t) in enumerate(zip(src_batch, tgt_batch)) if s.numel() > 0 and t.numel() > 1]
            if not valid_indices: continue
            src_batch = [src_batch[i] for i in valid_indices]
            tgt_batch = [tgt_batch[i] for i in valid_indices]
            src_padded = nn.utils.rnn.pad_sequence(src_batch, batch_first=True, padding_value=0).to(device)
            tgt_padded = nn.utils.rnn.pad_sequence(tgt_batch, batch_first=True, padding_value=0).to(device)
            tgt_input = tgt_padded[:, :-1]
            tgt_output = tgt_padded[:, 1:]
            pred = student_model(src_padded, tgt_input)
            loss = criterion(pred.reshape(-1, VOCAB_SIZE), tgt_output.reshape(-1))
            loss.backward()
            torch.nn.utils.clip_grad_norm_(student_model.parameters(), 1.0)
            optimizer.step()
            total_loss += loss.item()
            batch_count += 1
        avg_loss = total_loss / batch_count if batch_count > 0 else 0
        print(f"Epoch {epoch + 1}/{EPOCHS}, Average Loss: {avg_loss:.4f}")
    torch.save(student_model.state_dict(), STUDENT_MODEL_FILE)
    print(f"--- Training complete. Model saved to '{STUDENT_MODEL_FILE}' ---")


def evaluate_student(num_episodes=5):
    if not os.path.exists(STUDENT_MODEL_FILE):
        print(f"Error: Student model file '{STUDENT_MODEL_FILE}' not found.")
        return
    print("\n--- Phase 3: Start evaluating the student model ---")
    student_model = StudentLLM(VOCAB_SIZE, EMBED_DIM, NUM_HEADS, NUM_LAYERS).to(device)
    student_model.load_state_dict(torch.load(STUDENT_MODEL_FILE, map_location=device))
    env = UAVEnv()
    all_completion_rates = []

    for ep in range(num_episodes):
        state_desc = env.reset()
        done = False
        step = 0
        while not done:
            src_tensor = text_to_tensor(state_desc).unsqueeze(0)
            action = None
            if src_tensor.numel() == 0:
                print("Warning: State description resulted in an empty tensor. Executing default action.")
            else:
                generated_output = student_model.generate(src_tensor)
                # ===== 核心修正 4: 评估逻辑与新的解析函数同步 =====
                action = parse_llm_output(generated_output)

            # Use a default action if parsing fails
            if action is None:
                action = {"target_type": "position", "target_coords": env.uav_position.tolist(), "speed": "low"}
                print(f"\n[Episode {ep + 1}, Step {step + 1}]")
                print("UAV Action: Parsing failed, executing default hover-in-place action.")
            else:
                print(f"\n[Episode {ep + 1}, Step {step + 1}]")
                print(f"UAV Action: {action}")

            if ep < 2:
                env.render(action, ep + 1, step + 1)
            state_desc, reward, done, info = env.step(action)
            step += 1

        completion_rate = info['completion_rate']
        all_completion_rates.append(completion_rate)
        print(f"\n>>>> Episode {ep + 1} finished. Task Completion Rate: {completion_rate:.2%} <<<<")
        time.sleep(1)

    avg_completion = np.mean(all_completion_rates)
    print("\n--- Evaluation Summary ---")
    print(f"Average task completion rate over {num_episodes} episodes: {avg_completion:.2%}")


if __name__ == "__main__":
    if os.path.exists(SFT_DATA_FILE): os.remove(SFT_DATA_FILE)
    if os.path.exists(STUDENT_MODEL_FILE): os.remove(STUDENT_MODEL_FILE)
    generate_sft_data()
    train_student_model()
    evaluate_student(num_episodes=5)
