#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time    : 2024/12/24 17:11
# @Author  : zzp
# @File    : main
# @Software: PyCharm
import os
import sys
import time
from collections import deque
from datetime import datetime

import torch
import torch.nn.parallel
import torch.optim
import torch.utils.data
import torch.utils.data.distributed

from dataloaders import build_dataset
from models import build_model
from opts import arg_parser
from utils.CacheEntry import LRUCache
from utils.build_cfg import setup_cfg
from utils.helper import AverageMeter, mAP
from utils.validations import validate
import logging

formatted_time = "%Y-%m-%d-%H-%M-%S"


# 计算路由器节点和内容节点的特征
def calculate_feature(model, net, routers, clients):
    # 计算终端节点特征
    for client in clients.values():
        client.set_feature(model.compute_text_feature([client.get_content_id()]))

    # 计算路由器特征
    cnt = len(routers)
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    while cnt > 0:
        for key, value in routers.items():
            if value.is_feature:  # 特征已经计算过
                continue
            # 开始计算特征
            neighbors = net[key]
            flag = True  # 标识邻居节点特征是否全部计算过
            feature = torch.zeros(2, 512).to(device)  # [2,512]
            for neighbor in neighbors:
                if 'R' in neighbor and not routers[neighbor].is_feature:
                    flag = False
                    break
                feature += routers[neighbor].get_feature() if 'R' in neighbor else clients[neighbor].get_feature()
            # 邻居节点特征都计算过，可以计算路由器特征
            if flag:
                value.set_feature(feature / len(neighbors))
                cnt -= 1
                value.is_feature = True


def bfs(model, dataset, image_feature, queue, thre1, thre2):
    """
    使用广度优先遍历，将内容分发给具体的内容节点
    Args:
        model:
        dataset:
        image_feature:
        queue:
        thre1:
        thre2:

    Returns:
    """
    forwarding_interfaces = list()
    visited = set()
    outputs = torch.zeros(image_feature.shape[0], len(dataset.clients))  # 存储内容和所有能分发的节点的相似性
    ihop = 0.0  # 记录跳数
    avg_sim = 0.0  # 记录相似性
    isucc = 0.0  # 记录转发次数
    while queue:
        size = len(queue)
        # ihop += size
        for _ in range(size):
            # 取出队首元素
            node = queue.popleft()
            if node in visited:
                continue
            visited.add(node)
            # if
            if 'R' not in node:  # 终端节点
                isucc += 1
                # 计算相似性
                client = dataset.clients[node]
                output = model.similarity(image_feature, client.get_feature())
                Softmax = torch.nn.Softmax(dim=1)
                Sig = torch.nn.Sigmoid()
                if output.dim() == 3:
                    sim = Softmax(output).cpu()[:, 1]
                else:
                    sim = Sig(output).cpu()
                outputs[0, dataset.client2idx[node]] = sim
                if sim > thre1:
                    forwarding_interfaces.append(node)
                    ihop += 1
                    avg_sim += sim.item()
            else:  # 路由节点
                route = dataset.routers[node]
                # 计算相似性
                output = model.similarity(image_feature, route.get_feature())
                Softmax = torch.nn.Softmax(dim=1)
                Sig = torch.nn.Sigmoid()
                if output.dim() == 3:
                    sim = Softmax(output).cpu()[:, 1]
                else:
                    sim = Sig(output).cpu()
                if sim > thre2:
                    queue.extend(dataset.net[node])
                    ihop += 1
    avg_sim = 0.0 if len(forwarding_interfaces) == 0 else avg_sim / len(forwarding_interfaces)
    ihop = 0.0 if len(forwarding_interfaces) == 0 else ihop
    return forwarding_interfaces, outputs, isucc, ihop, avg_sim


def validate(model, data_loader, dataset, thre1, thre2):
    """
    Args:
        model:
        data_loader:
        dataset:
        thre1:
        thre2:

    Returns:

    """
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    with torch.no_grad():
        end = time.time()
        cache = LRUCache(100)
        count_succ = 0.0  # 记录转发成功的数据包数目
        count_receive = 0.0  # 记录成功接受的数据包数目
        count_hop = 0.0  # 记录跳数
        count_forwarding = 0.0  # 记录转发次数
        all_sim = 0.0  # 记录语义相关性
        count_invalid = 0.0  # 记录无效传输数据包
        count_hit = 0.0  # 记录命中次数
        for i, (images, target) in enumerate(data_loader):
            images = images.to(device)
            target = target.max(dim=1)[0]
            # 计算内容特征
            image_feature = model.compute_image_feature(images)  # [1, 521, 197]
            cacheEntry = cache.get(image_feature)
            if cfg.CDN.NO_CACHE:  # 不使用缓存
                # 直接转发内容
                # queue = deque(dataset.clients.keys())
                queue = deque(['R2', 'R4'])
                # 获取分发的终端节点
                forwarding_interfaces, output, isucc, ihop, sim = bfs(model, dataset, image_feature, queue, thre1,
                                                                      thre2)
                for interface in forwarding_interfaces:
                    client = dataset.clients[interface]
                    idx = client.content_id
                    if target[0, idx] == 0:  # 无效传输数据包
                        count_invalid += 1
                count_receive += len(forwarding_interfaces)
                count_succ += isucc
                count_hop += ihop
                count_forwarding += ihop * len(forwarding_interfaces)
                all_sim += sim
            else:  # 使用缓存
                if cacheEntry is None:  # 缓存未命中
                    # 直接转发内容
                    # queue = deque(dataset.clients.keys())
                    queue = deque(['R2', 'R4'])
                    # 获取分发的终端节点
                    forwarding_interfaces, output, isucc, ihop, sim = bfs(model, dataset, image_feature, queue, thre1,
                                                                          thre2)
                    for interface in forwarding_interfaces:
                        client = dataset.clients[interface]
                        idx = client.content_id
                        if target[0, idx] == 0:  # 无效传输数据包
                            count_invalid += 1
                    count_receive += len(forwarding_interfaces)
                    count_succ += isucc
                    count_hop += ihop
                    count_forwarding += ihop * len(forwarding_interfaces)
                    all_sim += sim
                    cache.put(i, image_feature, forwarding_interfaces, output)
                else:  # 缓存命中
                    count_hit += 1
                    queue = deque(['R2', 'R4'])
                    # 获取分发的终端节点
                    forwarding_interfaces, output, isucc, ihop, sim = bfs(model, dataset, image_feature, queue, thre1,
                                                                          thre2)
                    for interface in forwarding_interfaces:
                        client = dataset.clients[interface]
                        idx = client.content_id
                        if target[0, idx] == 0:  # 无效传输数据包
                            count_invalid += 1
                    count_receive += len(forwarding_interfaces)
                    count_succ += isucc
                    count_hop += ihop
                    count_forwarding += 0 if ihop * len(forwarding_interfaces) == 0 else 1
                    all_sim += sim
            if i % args.print_freq == 0:
                # 输出 传输成功率TSR、平均跳数AHC、平均转发次数AFC、平均语义相关性SRM、无效传输率ITR
                TSR = (count_succ / i) if i != 0 else 0
                AHC = (count_hop / count_receive) if count_receive != 0 else 0
                AFC = (count_forwarding / count_receive) if count_receive != 0 else 0
                SRM = (all_sim / count_receive) if count_receive != 0 else 0
                ITR = (count_invalid / count_receive) if count_receive != 0 else 0
                logger.info(
                    'Test: [{}/{}]\t'
                    'TSR: {:.3f}\t'
                    'AHC: {:.3f}\t'
                    'AFC: {:.3f}\t'
                    'SRM: {:.3f}\t'
                    'ITR: {:.3f}'.format(
                        i, len(data_loader),
                        TSR, AHC, AFC, SRM, ITR
                    )
                )
                if not cfg.CDN.NO_CACHE:  # 用了缓存
                    CHR = (count_hit / i) if i != 0 else 0
                    logger.info('Test: [{}/{}]\t'
                                'CHR: {:.3f}'.format(i, len(data_loader), CHR))
                    torch.cuda.empty_cache()

    return TSR, AHC, AFC, SRM, ITR


# 初始化网络模型
def main():
    # 初始化参数
    global args
    parser = arg_parser()
    args = parser.parse_args()
    global cfg
    cfg = setup_cfg(args)
    # 加载数据集
    test_split = cfg.DATASET.TEST_SPLIT
    test_dataset = build_dataset(cfg, test_split)
    test_loader = torch.utils.data.DataLoader(test_dataset, batch_size=cfg.DATALOADER.TEST.BATCH_SIZE,
                                              shuffle=cfg.DATALOADER.TEST.SHUFFLE,
                                              num_workers=cfg.DATALOADER.NUM_WORKERS, pin_memory=True)
    classnames = test_dataset.classnames

    # 创建模型
    model, arch_name = build_model(cfg, args, classnames)
    log_folder = os.path.join(cfg.OUTPUT_DIR, f"{arch_name}-{datetime.now().strftime(formatted_time)}")
    config_log(log_folder)
    model.eval()
    # 加载检查点
    if args.pretrained is not None and os.path.exists(args.pretrained):
        print('... loading pretrained weights from %s' % args.pretrained)
        checkpoint = torch.load(args.pretrained, map_location='cpu')
        # checkpoint = torch.load(args.pretrained, map_location='cpu', weights_only=False)
        state_dict = checkpoint['state_dict']
        epoch = checkpoint['epoch']
        model.load_state_dict(state_dict)
        print('Epoch: %d' % epoch)
    else:
        raise ValueError('args.pretrained is missing or its path does not exist')

    print('Evaluate with threshold %.2f' % args.thre)
    calculate_feature(model, test_dataset.net, test_dataset.routers, test_dataset.clients)
    # 计算指标
    TSR, AHC, AFC, SRM, ITR = validate(model, test_loader, test_dataset, 0.78, 0.18)
    logger.info(
        'TSR: {:.3f}\t '
        'AHC: {:.3f}\t '
        'AFC: {:.3f}\t '
        'SRM: {:.3f}\t '
        'ITR: {:.3f}'.format(
            TSR, AHC, AFC, SRM, ITR
        )
    )


def config_log(log_folder):
    global logger
    if not os.path.exists(log_folder):
        os.makedirs(log_folder)

    log_path = os.path.join(log_folder, 'log.log')
    # if os.path.exists(logfile_path):
    #     logfile = open(logfile_path, 'a')
    # else:
    #     logfile = open(logfile_path, 'w')

    # 创建一个日志器logger并设置其日志级别为DEBUG
    logger = logging.getLogger("logger")
    logger.setLevel(logging.DEBUG)

    # 创建一个流处理器handler并设置其日志级别为DEBUG
    handler = logging.StreamHandler(sys.stdout)
    handler.setLevel(logging.DEBUG)

    # 创建一个handler，用于写入日志文件
    fh = logging.FileHandler(log_path)
    fh.setLevel(logging.DEBUG)

    # 创建一个格式器formatter并将其添加到处理器handler
    formatter = logging.Formatter("%(asctime)s - %(name)s - %(levelname)s - %(message)s", datefmt=formatted_time)
    handler.setFormatter(formatter)
    fh.setFormatter(formatter)

    # 为日志器logger添加上面创建的处理器handler
    logger.addHandler(handler)
    logger.addHandler(fh)

    # 日志输出
    # logger.debug('debug message')
    # logger.info('info message')
    # logger.warning('warn message')
    # logger.error('error message')
    # logger.critical('critical message')


if __name__ == '__main__':
    main()
