﻿# -*- coding: utf-8 -*-
import numpy as np
import networkx as nx
import matplotlib.pyplot as plt
from scipy.special import softmax
from NetworkEnvironment import NetworkEnvironment

plt.rcParams['font.sans-serif'] = ['SimHei']  # Windows

class QoSRewardCalculator:
    @staticmethod
    def calculate_metrics(src, dst, env):
        attrs = env.get_link_attrs(src, dst)
        neighbors = env.get_neighbors(src)
        
        # 计算子网平均值
        avg_delay = np.mean([env.get_link_attrs(src, n)['delay'] for n in neighbors])
        avg_bw = np.mean([env.get_link_attrs(src, n)['available_bw'] for n in neighbors])
        
        # 延迟指标（越小越好）
        delay_metric = (2/np.pi) * np.arctan(attrs['delay'] - avg_delay)
        
        # 带宽指标（越大越好）
        bw_ratio = attrs['available_bw'] / attrs['total_bw']
        bw_metric = (2 * bw_ratio) - 1
        
        # 综合奖励（可调整权重）
        reward = -0.2 + 0.5*delay_metric + 0.3*bw_metric - 0.5*attrs['loss_rate']
        return reward

class QARAgent:
    def __init__(self, env):
        self.env = env
        self.q_table = {}
        self.hop_counts = []
        self.training_log = []
        
    def get_state_key(self, current, target): # 生成状态键,当前节点和目标节点
        return f"{current}->{target}"   # 当前节点到目标节点的路径
    
    def select_next_hop(self, current, target, temperature=1.0):
        state_key = self.get_state_key(current, target)
        neighbors = self.env.get_neighbors(current)
        
        if not neighbors:
            return None
        
        # 初始化Q值
        if state_key not in self.q_table:
            self.q_table[state_key] = {n: 0.0 for n in neighbors}
            
        # Softmax选择
        q_values = [self.q_table[state_key][n] for n in neighbors]
        probabilities = softmax(np.array(q_values) / temperature)
        return np.random.choice(neighbors, p=probabilities)
    
    def update_q_table(self, src, target, next_hop, reward, alpha=0.1, gamma=0.9):
        state_key = self.get_state_key(src, target)
        next_state_key = self.get_state_key(next_hop, target)
        
        max_next_q = max(self.q_table.get(next_state_key, {}).values(), default=0)
        current_q = self.q_table[state_key][next_hop]
        self.q_table[state_key][next_hop] += alpha * (reward + gamma*max_next_q - current_q)
    
    def train(self, episodes=500):
        for episode in range(episodes):
            current = self.env.source
            target = self.env.destination
            hops = 0
            path = [current]
            
            while current != target and hops < 50:  # 防止无限循环
                next_hop = self.select_next_hop(current, target)
                if next_hop is None:
                    break
                
                reward = QoSRewardCalculator.calculate_metrics(current, next_hop, self.env)
                self.update_q_table(current, target, next_hop, reward)
                
                current = next_hop
                hops += 1
                path.append(current)
            
            self.hop_counts.append(hops)
            self.training_log.append({
                "episode": episode,
                "hops": hops,
                "path": path
            })
            
            # 每50轮显示进度
            if episode % 50 == 0:
                print(f"Episode {episode}: 跳数={hops} 路径={'→'.join(path)}")
    
    def visualize_training(self):
        plt.figure(figsize=(12, 6))
        
        # 绘制跳数变化
        plt.plot(self.hop_counts, 'b-', alpha=0.3, label='每轮跳数')
        
        # 添加移动平均曲线
        window_size = 50
        moving_avg = np.convolve(self.hop_counts, np.ones(window_size)/window_size, mode='valid')
        plt.plot(range(window_size-1, len(self.hop_counts)), moving_avg, 
                'r-', linewidth=2, label=f'{window_size}轮移动平均')
        
        plt.title("西藏到北京路由优化过程")
        plt.xlabel("训练轮次")
        plt.ylabel("路径跳数")
        plt.legend()
        plt.grid(True)
        plt.savefig('qar_training.png')
        plt.show()
    
    def show_optimal_path(self):
        current = self.env.source
        target = self.env.destination
        path = [current]
        
        while current != target:
            state_key = self.get_state_key(current, target)
            if state_key not in self.q_table:
                break
            
            next_hop = max(self.q_table[state_key], key=self.q_table[state_key].get)
            path.append(next_hop)
            current = next_hop
        
        print("\n最优传输路径：")
        print(" → ".join(path))
        print(f"总跳数: {len(path)-1}")

class EnhancedQARAgent(QARAgent):
    def __init__(self, env):
        super().__init__(env)
        self.convergence_log = []
    
    def train(self, episodes=2000):
        for episode in range(episodes):
            # 动态参数
            temperature = max(0.1, 1.0 - episode/1000)
            alpha = 0.1 * (0.99 ** episode)
            
            # 训练循环
            current, hops = self.env.source, 0
            while current != self.env.destination and hops < 30:
                next_hop = self.select_next_hop(current, self.env.destination, temperature)
                if next_hop is None: break
                
                reward = self._calculate_enhanced_reward(current, next_hop)
                # self._update_q_value(current, next_hop, reward, alpha)
                self.update_q_table(
                    src=current,
                    target=self.env.destination,
                    next_hop=next_hop,
                    reward=reward,
                    alpha=alpha,  # 新增动态学习率
                    gamma=0.95     # 新增折扣因子
                )

                current, hops = next_hop, hops+1
            
            # 记录收敛状态
            self._monitor_convergence(episode, hops)
            
            # 提前终止条件
            if self._check_convergence():
                break
    
    def _calculate_enhanced_reward(self, src, dst):
        """强化版奖励函数"""
        # ...实现上述奖励逻辑...
    
    def _monitor_convergence(self, episode, hops):
        """收敛性监控"""
        self.hop_counts.append(hops)
        if episode % 50 == 0:
            recent = self.hop_counts[-50:] if len(self.hop_counts)>=50 else self.hop_counts
            std_dev = np.std(recent)
            self.convergence_log.append( (episode, std_dev) )
            print(f"轮次 {episode} | 跳数标准差: {std_dev:.2f}")

    def _check_convergence(self, threshold=0.1):
        """标准差收敛检测"""
        if len(self.hop_counts) < 100:
            return False
        return np.std(self.hop_counts[-100:]) < threshold



# 初始化网络环境
env = NetworkEnvironment(node_count=25, link_count=53)

# 训练QAR智能体
agent = EnhancedQARAgent(env)
agent.train(episodes=1000)

# 展示结果
agent.visualize_training()
agent.show_optimal_path()