import matplotlib.pyplot as plt
import pandas as pd
import sys
from optparse import OptionParser
import multiprocessing as mp
import os

import matplotlib as mpl

# Set the chunksize to a larger value, e.g., 10000
mpl.rcParams['agg.path.chunksize'] = 10000

# Optionally, increase the path simplification threshold
mpl.rcParams['path.simplify_threshold'] = 1.0

zorder = [0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]

qlen_data = {}
def processing_queue_data(qlen_filename, switch_ids, dev_ids, queue_mode, st, ed):
    auto_valid = {}
    if queue_mode != 0:
        with open(qlen_filename, 'r') as f:
            for line in f:
                parts = line.strip().split()
                if not parts:
                    continue
                switch_id = int(parts[0])
                if switch_id in switch_ids or queue_mode == 2:
                    timestamp = int(parts[1])
                    if st != -1 and timestamp < st:
                        continue
                    if ed != -1 and timestamp > ed:
                        break
                    for i in range(2, len(parts), 2):
                        dev_id = int(parts[i])
                        qlen = int(parts[i+1])
                        if dev_id in dev_ids or queue_mode == 2:
                            if (switch_id, dev_id) not in qlen_data:
                                qlen_data[(switch_id, dev_id)] = {"timestamp": [], "qlen": []}
                            qlen_data[(switch_id, dev_id)]["timestamp"].append(timestamp)
                            qlen_data[(switch_id, dev_id)]["qlen"].append(qlen)
                        if queue_mode == 2:
                            if (switch_id, dev_id) not in auto_valid:
                                auto_valid[(switch_id, dev_id)] = False
                            auto_valid[(switch_id, dev_id)] = auto_valid[(switch_id, dev_id)] or qlen > 0

    if queue_mode == 2:
        keys = list(qlen_data.keys())
        for key in keys:
            if not auto_valid[key]:
                del qlen_data[key]

        # 仅print auto_valid中为True的项
        working_if = []
        for key, value in auto_valid.items():
            if value:
                working_if.append(key)
        print(working_if)
    
    #qlen_data = data_clean(qlen_data)

def data_clean(qlen_data):
    cleaned_data = {}
    for key, value in qlen_data.items():
        timestamps = value["timestamp"]
        qlens = value["qlen"]
        
        # 初始化清洗后的数据
        cleaned_timestamps = []
        cleaned_qlens = []
        
        # 遍历队列长度数据，删除相邻差值不超过100的点
        prev_qlen = None
        for i in range(len(qlens)):
            if prev_qlen is None or abs(qlens[i] - prev_qlen) > 10000:
                cleaned_timestamps.append(timestamps[i])
                cleaned_qlens.append(qlens[i])
                prev_qlen = qlens[i]
        
        cleaned_data[key] = {"timestamp": cleaned_timestamps, "qlen": cleaned_qlens}
    
    return cleaned_data

def plot_rate(filename, title="", st=-1, ed=-1, cc=""):
    # 读取速率文件
    data = pd.read_csv(filename, sep=" ", header=None, names=["source_node_id", "dest_node_id", "sport", "dport", "rate", "timestamp"])
    
    # 创建一个字典来存储每条流的数据
    flows = {}
    
    # 遍历数据，按照（source_node_id, dest_node_id, sport）唯一标识一条流
    for index, row in data.iterrows():
        flow_id = (row["source_node_id"], row["dest_node_id"], row["sport"])
        if flow_id not in flows:
            flows[flow_id] = {"timestamp": [], "rate": []}
        if st != -1 and row["timestamp"] < st:
            continue
        if ed != -1 and row["timestamp"] > ed:
            break
        flows[flow_id]["timestamp"].append(row["timestamp"])
        flows[flow_id]["rate"].append(row["rate"])

    # 创建左右排列的子图
    fig, (ax1, ax2) = plt.subplots(2, 1, figsize=(20, 8))
    colors = ['b', 'g', 'r', 'c', 'm', 'y', 'k']
    color_idx = 0
    
    zidx = 0
    # 绘制每条流的折线图在第一个子图中
    for flow_id, flow_data in flows.items():
        ax1.plot(flow_data["timestamp"], flow_data["rate"], linestyle='--', marker='o', markersize=2, label=f"Flow {flow_id}", color=colors[color_idx], zorder=zorder[zidx])
        color_idx = (color_idx + 1) % len(colors)
        zidx += 1
    
    # 添加第一个子图的图例和标签
    ax1.set_xlabel("Timestamp")
    ax1.set_ylabel("Rate")
    ax1.set_title(f"{title}_rate over Time for {cc}")
    ax1.legend(loc='best')  # 使用 'best' 自动选择最佳位置
    
    # 绘制队列长度的折线图在第二个子图中
    if len(qlen_data) > 0:
        for (switch_id, dev_id), qlen in qlen_data.items():
            ax2.plot(qlen["timestamp"], qlen["qlen"], linestyle='--', label=f"Qlen Switch {switch_id} Dev {dev_id}", color=colors[color_idx])
            color_idx = (color_idx + 1) % len(colors)

        ax2.set_xlabel("Timestamp")
        ax2.set_ylabel("Queue Length")
        ax2.set_title("Queue Length over Time")
        ax2.legend(loc='best')  # 使用 'best' 自动选择最佳位置
    
    # 计算所有时间戳的最小和最大值
    all_timestamps = []
    for flow_data in flows.values():
        all_timestamps.extend(flow_data["timestamp"])
    for qlen in qlen_data.values():
        all_timestamps.extend(qlen["timestamp"])
    
    if all_timestamps:
        min_time = min(all_timestamps)
        max_time = max(all_timestamps)
        ax1.set_xlim(min_time, max_time)
        ax2.set_xlim(min_time, max_time)
    
    # 调整子图布局
    plt.tight_layout()

    # 保存图像
    output_filename = filename + ".png"
    plt.savefig(output_filename, bbox_inches='tight', dpi=600)
    plt.close()
    print(f"Plot saved as {output_filename}")


def main():
    parser = OptionParser()
    parser.add_option("-c", "--cc", dest = "cc", help = "congestion algorithm")
    parser.add_option("-t", "--traffic", dest = "traffic", help = "traffic pattern", default="src")
    parser.add_option("-q", "--queue", dest = "queue", help = "queue type", default=2)
    parser.add_option("-s", "--start_time", dest = "st", help = "start time", default=-1)
    parser.add_option("-e", "--end_time", dest="ed", help="end time", default=-1)
    parser.add_option("-l", "--loc", dest="loc", help="location", default="dc")
    parser.add_option("-i", "--exp_id", dest="exp_id", help="experiment id", default="0")
    options, args = parser.parse_args()
    cc = options.cc
    traffic = options.traffic
    queue = int(options.queue)
    st = float(options.st)
    ed = float(options.ed)
    loc = options.loc
    exp_id = options.exp_id
    
    if st != -1:
        st = st * 1000000000
    if ed != -1:
        ed = ed * 1000000000

    switch_id = [24,25,26]
    dev_id = [2,13]

    current_dir = os.path.dirname(os.path.abspath(__file__))
    qlen_filename = f'{current_dir}/../output/qlen_{cc}_{exp_id}.txt'

    if queue != 0:
        processing_queue_data(qlen_filename, switch_id, dev_id, queue, st, ed)

    tasks = []
    if "s" in traffic:
        filename = f'{current_dir}/../output/send_rate_{cc}_{exp_id}.txt'
        tasks.append((filename, "send", st, ed, cc))
    if "r" in traffic:
        filename = f'{current_dir}/../output/recv_rate_{cc}_{exp_id}.txt'
        tasks.append((filename, "recv", st, ed, cc))
    if "c" in traffic:
        filename = f'{current_dir}/../output/rate_{cc}_{exp_id}.txt'
        tasks.append((filename, "cc", st, ed, cc))
    
    with mp.Pool(processes=len(tasks)) as pool:
        pool.starmap(plot_rate, tasks)

if __name__ == "__main__":
    main()
