import torch
import torch.nn as nn
import torch.nn.functional as F
import math
import numpy as np
import matplotlib.pyplot as plt

# ====================== 工具函数 ======================
def generate_random_cities(n, batch_size=1, device='cpu'):
    """生成随机城市坐标 (batch_size, n, 2)"""
    return torch.rand(batch_size, n, 2, device=device)

def euclidean_distance(city1, city2):
    """计算欧氏距离 (支持批量计算)"""
    return torch.norm(city1 - city2, dim=-1)

def visualize_route(cities, path, reward=None, save_path=None):
    """
    可视化TSP路径
    
    参数:
    cities: torch.Tensor, 形状为(n, 2)，表示城市坐标
    path: list of int, 访问顺序
    reward: float, 可选，路径的奖励值（负的总距离）
    save_path: str, 可选，图片保存路径
    """
    # 转换为numpy数组
    cities = cities.cpu().numpy()
    
    # 创建图形
    plt.figure(figsize=(10, 10))
    
    # 绘制城市点
    plt.scatter(cities[:, 0], cities[:, 1], c='red', s=100)
    
    # 给每个城市标号
    for i in range(len(cities)):
        plt.annotate(f'City {i}', (cities[i, 0], cities[i, 1]), 
                    xytext=(5, 5), textcoords='offset points')
    
    # 绘制路径
    path = path + [path[0]]  # 添加返回起点的路径
    for i in range(len(path)-1):
        start = cities[path[i]]
        end = cities[path[i+1]]
        plt.plot([start[0], end[0]], [start[1], end[1]], 'b-', alpha=0.5)
    
    # 添加标题
    if reward is not None:
        plt.title(f'TSP Route (Total Distance: {-reward:.2f})')
    else:
        plt.title('TSP Route')
    
    # 设置坐标轴范围
    plt.xlim(-0.1, 1.1)
    plt.ylim(-0.1, 1.1)
    
    # 保存或显示
    if save_path:
        plt.savefig(save_path)
    plt.show()
    plt.close()

# ====================== 模型定义 ======================
class Encoder(nn.Module):
    def __init__(self, input_dim, embed_dim, num_layers):
        super().__init__()
        # 将输入维度映射到嵌入维度的线性层
        self.embed = nn.Linear(input_dim, embed_dim)
        # 创建多层Transformer编码器
        self.layers = nn.ModuleList([
            nn.TransformerEncoderLayer(
                d_model=embed_dim,    # 模型的维度
                nhead=8,             # 多头注意力中的头数
                dim_feedforward=512, # 前馈网络的隐藏层维度
                dropout=0.1          # dropout比率，用于防止过拟合
            ) for _ in range(num_layers)]
        )
    
    def forward(self, x):
        # x: (batch_size, n_cities, input_dim)
        x = self.embed(x)  # (batch_size, n_cities, embed_dim)
        x = x.permute(1, 0, 2)  # (n_cities, batch_size, embed_dim) for Transformer
        for layer in self.layers:
            x = layer(x)
        return x.permute(1, 0, 2)  # 恢复为 (batch_size, n_cities, embed_dim)

class Decoder(nn.Module):
    def __init__(self, embed_dim):
        super().__init__()
        self.embed_dim = embed_dim
        # 动态生成query的LSTM
        self.lstm = nn.LSTMCell(embed_dim, embed_dim)
        # 注意力计算参数
        self.W_q = nn.Linear(embed_dim, embed_dim)
        self.W_k = nn.Linear(embed_dim, embed_dim)
        self.v = nn.Linear(embed_dim, 1)
    
    def forward(self, enc_out, hidden, mask):
        """
        enc_out: (batch_size, n_cities, embed_dim)
        hidden: tuple (h, c) 各为(batch_size, embed_dim)
        mask: (batch_size, n_cities)
        """
        # 更新LSTM状态
        h, c = self.lstm(hidden[0], hidden)
        
        # 计算注意力分数
        query = self.W_q(h.unsqueeze(1))  # (batch_size, 1, embed_dim)
        keys = self.W_k(enc_out)          # (batch_size, n_cities, embed_dim)
        
        # 加性注意力
        scores = self.v(torch.tanh(query + keys)).squeeze(-1)  # (batch_size, n_cities)
        scores = scores.masked_fill(mask, float('-inf'))        # 屏蔽已访问城市
        
        probs = F.softmax(scores, dim=-1)
        return probs, (h, c)

class Critic(nn.Module):
    """价值网络用于计算基线"""
    def __init__(self, embed_dim):
        super().__init__()
        self.fc = nn.Sequential(
            nn.Linear(embed_dim, 256),
            nn.ReLU(),
            nn.Linear(256, 1)
        )
    
    def forward(self, enc_out):
        # enc_out: (batch_size, n_cities, embed_dim)
        pooled = enc_out.mean(dim=1)  # (batch_size, embed_dim)
        return self.fc(pooled).squeeze(-1)  # (batch_size)

class TSPModel(nn.Module):
    def __init__(self, input_dim=2, embed_dim=128, num_layers=3):
        super().__init__()
        self.encoder = Encoder(input_dim, embed_dim, num_layers)
        self.decoder = Decoder(embed_dim)
        self.critic = Critic(embed_dim)
        
    def init_hidden(self, batch_size, device):
        """初始化LSTM隐藏状态"""
        h = torch.zeros(batch_size, self.decoder.embed_dim, device=device)
        c = torch.zeros(batch_size, self.decoder.embed_dim, device=device)
        return (h, c)
    
    def forward(self, cities, device='cpu'):
        """
        cities: (batch_size, n, 2)
        返回: 
        - log_probs: 各步动作的对数概率 (batch_size, n)
        - rewards: 路径总距离的负数 (batch_size)
        - baseline: Critic预测的基线值 (batch_size)
        """
        batch_size, n, _ = cities.shape
        enc_out = self.encoder(cities)  # (batch_size, n, embed_dim)
        baseline = self.critic(enc_out)  # (batch_size)
        
        hidden = self.init_hidden(batch_size, device)
        log_probs = []
        total_distances = torch.zeros(batch_size, device=device)
        
        # 选择起点（这里固定为第一个城市）
        current_pos = cities[:, 0]  # (batch_size, 2)
        path = [torch.zeros(batch_size, dtype=torch.long, device=device)]
        
        # 创建访问记录张量
        visited = torch.zeros(batch_size, n, dtype=torch.bool, device=device)
        visited[:, 0] = True  # 标记起始城市为已访问
        
        for step in range(n-1):
            # 获取当前城市嵌入
            current_emb = enc_out[torch.arange(batch_size), path[-1]]
            
            # 计算动作概率 - 使用visited的副本作为mask
            probs, hidden = self.decoder(
                enc_out, 
                hidden, 
                visited.clone()  # 使用clone()创建副本
            )
            
            # 采样动作
            actions = torch.multinomial(probs, 1).squeeze(-1)  # (batch_size)
            log_prob = torch.log(probs.gather(1, actions.unsqueeze(-1))).squeeze(1)
            log_probs.append(log_prob)
            
            # 计算距离
            next_pos = cities[torch.arange(batch_size), actions]
            dist = euclidean_distance(current_pos, next_pos)
            total_distances += dist
            
            # 更新状态
            current_pos = next_pos
            path.append(actions)
            visited[torch.arange(batch_size), actions] = True
        
        # 返回起点的距离
        dist_to_start = euclidean_distance(current_pos, cities[:, 0])
        total_distances += dist_to_start
        
        # 转换为奖励（负距离）
        rewards = -total_distances
        
        # 合并log_probs (batch_size, n-1)
        log_probs = torch.stack(log_probs, dim=1)
        
        return log_probs, rewards, baseline

# ====================== 训练循环 ======================
def train(model, device, num_episodes=10000, batch_size=32, n_cities=20, visualize_every=1000):
    """
    训练函数
    
    参数:
    visualize_every: int, 每隔多少轮显示一次可视化结果
    """
    optimizer = torch.optim.Adam(model.parameters(), lr=1e-4)
    moving_avg = 0.0
    alpha = 0.05
    
    for episode in range(num_episodes):
        # 生成数据
        cities = generate_random_cities(n_cities, batch_size, device)
        
        # 前向计算
        log_probs, rewards, baseline = model(cities, device)
        
        # 计算移动平均基线
        with torch.no_grad():
            if episode == 0:
                moving_avg = rewards.float().mean().item()
            else:
                moving_avg = (1 - alpha)*moving_avg + alpha*rewards.float().mean().item()
        
        # 计算损失
        advantage = (rewards - baseline).detach()  # 分离梯度
        policy_loss = -(log_probs.sum(dim=1) * advantage).mean()
        
        # Critic的MSE损失
        value_loss = F.mse_loss(baseline, rewards.detach())
        
        # 总损失
        total_loss = policy_loss + value_loss
        
        # 反向传播
        optimizer.zero_grad()
        total_loss.backward()
        nn.utils.clip_grad_norm_(model.parameters(), max_norm=2.0)
        optimizer.step()
        
        # 可视化
        if episode % visualize_every == 0:
            # 使用批次中的第一个样本进行可视化
            with torch.no_grad():
                # 重新运行模型获取路径
                model.eval()
                _, rewards_eval, _ = model(cities[0:1], device)
                path = []
                visited = torch.zeros(1, n_cities, dtype=torch.bool, device=device)
                current = torch.zeros(1, dtype=torch.long, device=device)
                
                for _ in range(n_cities-1):
                    path.append(current.item())
                    visited[0, current] = True
                    
                    # 获取下一个城市
                    enc_out = model.encoder(cities[0:1])
                    hidden = model.init_hidden(1, device)
                    probs, _ = model.decoder(enc_out, hidden, visited)
                    current = torch.argmax(probs[0])
                
                path.append(current.item())
                
                # 可视化
                visualize_route(
                    cities[0], 
                    path, 
                    rewards_eval.item(),
                    save_path=f'tsp_route_episode_{episode}.png'
                )
                model.train()
        
        # 日志输出
        if episode % 100 == 0:
            print(f"Episode {episode}, Avg Reward: {rewards.mean().item():.2f}, "
                  f"Baseline: {moving_avg:.2f}, Loss: {total_loss.item():.2f}")

if __name__ == "__main__":
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
    model = TSPModel(embed_dim=128).to(device)
    train(model, device, num_episodes=10000, batch_size=32, n_cities=60, visualize_every=1000)