﻿import numpy as np
import networkx as nx
import matplotlib.pyplot as plt
from scipy.special import softmax
import random
import math
from NetworkEnvironment import NetworkEnvironment,draw_network

plt.rcParams['font.sans-serif'] = ['SimHei']  # Windows



class QoSRewardCalculator:
    @staticmethod  # 你可以像调用普通函数一样调用静态方法，而不需要创建类的实例
    def calculate_metrics(src, dst, env):
        attrs = env.get_link_attrs(src, dst)
        neighbors = env.get_neighbors(src)
        
        # 计算子网平均值
        avg_delay = np.mean([env.get_link_attrs(src, n)['delay'] for n in neighbors])
        avg_bw = np.mean([env.get_link_attrs(src, n)['available_bw'] for n in neighbors])
        
        # 延迟指标（越小越好）
        delay_metric = (2/np.pi) * np.arctan(attrs['delay'] - avg_delay)
        
        # 带宽指标（越大越好）
        bw_ratio = attrs['available_bw'] / attrs['total_bw']
        bw_metric = (2 * bw_ratio) - 1
        
        # 综合奖励（可调整权重）
        reward = -0.2 + 0.5*delay_metric + 0.3*bw_metric - 0.5*attrs['loss_rate']
        return reward

class QARAgent:
    def __init__(self, env):
        self.env = env
        self.q_table = {}
        self.hop_counts = []
        self.training_log = []
        
    def get_state_key(self, current, target):
        return f"{current}->{target}"
    
    def select_next_hop(self, current, target, temperature=1.0):
        state_key = self.get_state_key(current, target)
        neighbors = self.env.get_neighbors(current)
        
        if not neighbors:
            return None
        
        # 初始化Q值
        if state_key not in self.q_table:
            self.q_table[state_key] = {n: 0.0 for n in neighbors}
            
        # Softmax选择
        q_values = [self.q_table[state_key][n] for n in neighbors] # q_table的值
        probabilities = softmax(np.array(q_values) / temperature)
        return np.random.choice(neighbors, p=probabilities)
    
    def update_q_table(self, src, target, next_hop, reward, alpha=0.1, gamma=0.9):
        state_key = self.get_state_key(src, target)
        next_state_key = self.get_state_key(next_hop, target)
        
        max_next_q = max(self.q_table.get(next_state_key, {}).values(), default=0)
        current_q = self.q_table[state_key][next_hop]
        self.q_table[state_key][next_hop] += alpha * (reward + gamma*max_next_q - current_q)
    
    def train(self, episodes=500):
        for episode in range(episodes):
            current = self.env.source
            target = self.env.destination
            hops = 0
            path = [current]
            
            while current != target and hops < 50:  # 防止无限循环
                next_hop = self.select_next_hop(current, target)
                if next_hop is None:
                    break
                
                reward = QoSRewardCalculator.calculate_metrics(current, next_hop, self.env)
                self.update_q_table(current, target, next_hop, reward)
                
                current = next_hop
                hops += 1
                path.append(current)
            
            self.hop_counts.append(hops)
            self.training_log.append({
                "episode": episode,
                "hops": hops,
                "path": path
            })
            
            # 每50轮显示进度
            if episode % 500 == 0:
                print(f"Episode {episode}: 跳数={hops} 路径={'→'.join(path)}")
    
    def visualize_training(self):
        plt.figure(figsize=(12, 6))
        
        # 绘制跳数变化
        plt.plot(self.hop_counts, 'b-', alpha=0.3, label='每轮跳数')
        
        # 添加移动平均曲线
        window_size = 50
        moving_avg = np.convolve(self.hop_counts, np.ones(window_size)/window_size, mode='valid')
        plt.plot(range(window_size-1, len(self.hop_counts)), moving_avg, 
                'r-', linewidth=2, label=f'{window_size}轮移动平均')
        
        plt.title("西藏到北京路由优化过程")
        plt.xlabel("训练轮次")
        plt.ylabel("路径跳数")
        plt.legend()
        plt.grid(True)
        plt.savefig('qar_training.png')
        plt.show()
    
    def show_optimal_path(self):
        current = self.env.source
        target = self.env.destination
        path = [current]
        
        while current != target:
            state_key = self.get_state_key(current, target)
            if state_key not in self.q_table:
                break
            
            next_hop = max(self.q_table[state_key], key=self.q_table[state_key].get)
            path.append(next_hop)
            current = next_hop
        
        print("\n最优传输路径：")
        print(" → ".join(path))
        print(f"总跳数: {len(path)-1}")

# 初始化网络环境
env = NetworkEnvironment(node_count=25, link_count=90)
draw_network(env.graph)
# 训练QAR智能体
agent = QARAgent(env)
agent.train(episodes=5000)

# 展示结果
agent.visualize_training()
agent.show_optimal_path()
