import torch
from torch import nn, optim
import cv2 as cv
from time import time
import csv

import torch.utils.data
import torch.backends.cudnn as cudnn
from torchvision import transforms as transforms
import numpy as np

import argparse
import cv2
from models import *

import VideoProcess as videoPro
from sklearn.preprocessing import MinMaxScaler
from Arguments import get_args
import os

transform1 = transforms.Compose([
    transforms.ToTensor(),  # 归一化操作range [0, 255] -> [0.0,1.0]
])
transform2 = transforms.Compose([
    transforms.ToTensor(),  # 归一化 --> [-1.0, 1.0]
    transforms.Normalize(mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5))
])


# 计算用户视野范围
def CalculateUserView(x, y, W_Frame, H_Frame, W_Tile, H_Tile):
    """
    return:
        UWL UHL 应该是矩形的左上角坐标
        UWH UHH 应该是矩形的右下角坐标
        W, H 表示动态设定的用户视野范围
     """
    W = int(W_Tile * 0.5)  # 注意这里需要传入整数
    H = int(H_Tile * 0.5)
    UWL = x - W
    UHL = y - H
    UWH = x + W
    UHH = y + H

    UWL = max(UWL, 0)
    UHL = max(UHL, 0)
    UWH = min(UWH, int(W_Frame))
    UHH = min(UHH, int(H_Frame))

    return UWL, UHL, UWH, UHH


# 检测预测结果？
def CheckPredictResult(TileForCheck, UWL, UHL, UWH, UHH, W_Tile, H_Tile):
    """
    UWL, UHL 表示用户矩形视野左上角坐标 --> (iL, jL)
    UWH, UHH 表示用户矩形视野右上角坐标 --> (iH, jH)
    计算矩形四个角的坐标，形如：
        (iL, jL) ------ (iH, jL)
            |                |
            |                |
            |                |
        (iL, jH) ------ (iH, jH)
    return :
        Flag(acc for each frame): 每一帧的用户视野是否被预测的tiles所覆盖
        T(countMatched): （0~4）in 25 tiles
     """
    iL = int(math.floor(UWL / W_Tile))
    jL = int(math.floor(UHL / H_Tile))
    iH = int(math.floor(UWH / W_Tile))
    jH = int(math.floor(UHH / H_Tile))
    allTileTrue = 0
    A, B, C, D = 0, 0, 0, 0
    iH = min(iH, args.tile_x - 1)
    jH = min(jH, args.tile_y - 1)
    try:
        A = TileForCheck[jL * args.tile_x + iL]
        B = TileForCheck[jL * args.tile_x + iH]
        C = TileForCheck[jH * args.tile_y + iL]
        D = TileForCheck[jH * args.tile_y + iH]
    except IndexError:
        print("CheckList out of index")
        exit()
    tileTrueNum = A + B + C + D
    if A * B * C * D == 1:
        allTileTrue = 1
    return allTileTrue, tileTrueNum


def CheckPredictResFeedback(TileForCheck, TileByFeedback):
    """
    TileForCheck: tile状态数组 1 -- 被选择，0 -- 未被选择
    TileByFeedback: 反馈数组
    return:
        CountModify: 被反馈数组修改的tile数量
    """
    CountModify = 0
    for i in range(len(TileForCheck)):
        if TileForCheck[i] == 0 and TileByFeedback[i] == 1:
            TileForCheck[i] = 1
            CountModify += 1
    return CountModify


# 有点类似于LSTM的Feedback
def UpdateTiletFeedback(TileByFeedback, UWL, UHL, UWH, UHH, W_Tile, H_Tile):
    """
        UWL, UHL 表示用户矩形视野左上角坐标 --> (iL, jL)
        UWH, UHH 表示用户矩形视野右上角坐标 --> (iH, jH)
        计算矩形四个角的坐标，形如：
            (iL, jL) ------ (iH, jL)
                |                |
                |                |
                |                |
            (iL, jH) ------ (iH, jH)
    """
    for i in range(len(TileByFeedback)):
        TileByFeedback[i] = 0
    iL = int(math.floor(UWL / W_Tile))
    jL = int(math.floor(UHL / H_Tile))
    iH = int(math.floor(UWH / W_Tile))
    jH = int(math.floor(UHH / H_Tile))

    iH = min(iH, args.tile_x - 1)
    jH = min(jH, args.tile_y - 1)
    for i in range(iL, iH + 1):
        for j in range(jL, jH + 1):
            TileByFeedback[j * args.tile_y + i] = 1


# 图片转化成Tensor
def ImageofCVToTensor(imgdata):
    img1 = imgdata[0]
    imgT1 = transform1(img1)
    img2 = imgdata[1]
    imgT2 = transform1(img2)
    TenTemp1 = torch.stack((imgT1, imgT2), 0)
    for i in range(2, len(imgdata), 2):
        img1 = imgdata[i]
        imgT1 = transform1(img1)
        img2 = imgdata[i + 1]
        imgT2 = transform1(img2)
        TenTemp2 = torch.stack((imgT1, imgT2), 0)
        TenTemp1 = torch.cat((TenTemp1, TenTemp2), 0)
    return TenTemp1


class Solver(object):
    def __init__(self, config):
        self.model = None
        self.lr = config.lr
        self.lrLSTM = config.lrLSTM
        self.criterion = None
        self.optimizer = None
        self.scheduler = None
        self.device = None
        self.cuda = config.cuda

    def load_model(self):
        if self.cuda:
            self.device = torch.device('cuda')
            cudnn.benchmark = True
        else:
            self.device = torch.device('cpu')

        self.model = myNet().to(self.device)
        # self.model = VGG13().to(self.device)
        print("Total number of paramerters in networks is {}  ".format(sum(x.numel() for x in self.model.parameters())))

        """ 
        选择不同的优化函数计算梯度
         """
        self.optimizer = optim.Adam(self.model.parameters(), lr=self.lr)
        # 动态 learning rate: https://blog.csdn.net/qq_41872630/article/details/86749972
        # self.scheduler = optim.lr_scheduler.MultiStepLR(self.optimizer, milestones=[75, 150], gamma=0.5)
        """ 
        计算Loss应该根据模型来选择
         """
        self.criterion = nn.CrossEntropyLoss().to(self.device)

    def run(self, videoId, userId, thres_factor, Epochs):
        """ 
        :param videoId: 视频编号
        :param userId: 用户编号
        :param thres_factor: 决定阈值
        :param Epochs
         """
        start = time()
        self.load_model()

        VideoName = FileList[videoId]
        # 视频路径
        videofile = '../videos/' + VideoName + ".mp4"
        tmp1 = VideoName[0]
        tmp2 = int(VideoName[2]) - 1

        cap = cv.VideoCapture(videofile)
        capB = cv.VideoCapture(videofile)

        # 用户观看行为路径
        UserDataCSV = '../vr-dataset-init/Experiment_' + tmp1 + '/' + str(userId) + "/video_" + str(tmp2) + ".csv"

        # 视频信息
        W_Frame = cap.get(3)
        H_Frame = cap.get(4)
        # check the parameters of get frame:  https://blog.csdn.net/qhd1994/article/details/80238707
        print("===============" + VideoName + "============")
        print("Frame width:", W_Frame)
        print("Frame height:", H_Frame)
        FrameRate = int(round(cap.get(5)))                          # 29.9 fps changed to 30
        SubSampleRate = 4                                           # 4 frames per second
        SubSampleStep = int(math.ceil(FrameRate / SubSampleRate))   # ceil(30 / 4) = 8
        TotalFrames = cap.get(7)                                    # 视频总帧数
        TotalSeconds = int(round(TotalFrames / FrameRate))          # 视频时长
        print("FrameRate:", FrameRate)
        print("TotalFrames:", TotalFrames)
        print("TotalSecond (totalframes / framerate):", TotalSeconds)

        LocationPerFrame, all_dataPF = videoPro.userLocal_One(FrameRate, UserDataCSV, TotalSeconds, H_Frame, W_Frame)
        if not LocationPerFrame:
            print("xxxxxxxxxxxxxxxTimeStamp File Error...xxxxxxxxxxxxxxxxxxx")
            return
        print("Total frame from user data ==> len(LocationPerFrame)", len(LocationPerFrame))

        W_Tile, H_Tile = W_Frame / args.tile_x, H_Frame / args.tile_y  # W_Tile H_Tile: 块的宽度和高度
        bufInSecond = 2                     # bufInSecond: buffer长度bufLen的基数
        bufLen = FrameRate * bufInSecond    # bufLen: 30 * 2 = 60

        # record log
        log_path = args.log_path + f'user_{userId}'
        if not os.path.exists(log_path):
            os.mkdir(log_path)

        LossLog = log_path + f"/{fileNameList[videoId]}_loss_{userId}.csv"
        LossObj = open(LossLog, 'w', newline='')

        MetricsLog = log_path + f"/{fileNameList[videoId]}_log_{userId}.csv"
        MetricsObj = open(MetricsLog, 'w', newline='')

        writerLoss = csv.writer(LossObj)
        writerMetrics = csv.writer(MetricsObj)

        # 先写入头信息
        rows = [['VideoName', 'UserIndex', 'ThresFactor', 'Epochs', 'TotalTile/Buffer', 'SampleFrames/Buffer'],
                [VideoName, userId, thres_factor, Epochs, args.tile_x * args.tile_y * bufLen, SubSampleStep]]
        writerLoss.writerows(rows)
        writerMetrics.writerows(rows)
        writerLoss.writerow(['AccInBuffer', 'RunningTime', 'TrainedEpochs', 'LossList'])
        writerMetrics.writerow(['PredictTile/Buffer', 'PredictTile/Frame',
                                'MeanAccInBuffer', 'MeanRecallInBuffer', 'MeanPrecisionInBuffer',
                                'MeanLowBand/Buffer', 'MeanHighBand/Buffer'])

        Video = VideoName + '_'+ 'Tk-' + str(thres_factor) + '_epoch-' + str(Epochs) + '.mp4'
        fourcc = cv2.VideoWriter_fourcc(*'mp4v')
        out = cv2.VideoWriter('normal/' + Video, fourcc, FrameRate, (int(W_Frame), int(H_Frame)))

        # 视频运行的主循环
        countMain = 0                                   # 程序主计数器
        ColorPrediction = (255, 255, 0)                   # 预测的BGR: 蓝色
        addColorPrediction = (255, 255, 0)              # 补充的BGR: 青色
        ColorView = (0, 0, 255)                         # 实际的BGR: 红色
        FirstLoss = 0                                   # loss
        TotalTile = args.tile_x * args.tile_y           # 25 tiles
        TileByFeedback = [0 for _ in range(TotalTile)]  # tile status by user feedback
        baseId = -1                                     # 定位LocationFrame的基数
        train_correct = 0
        train_bandTile = 0
        AverageAccList, AverageRecallList, AveragePreciseList = [], [], []
        AverageLowBandList, AverageHighBandList = [], []
        while True:
            buffer_frame_list = []      # 表示视频帧
            buffer_viewport_list = []   # 视频帧对应的用户观看区域

            """ # 获取一个buffer内图片和用户数据
                添加下采样，1秒4帧就可以 bufLen = FrameRate * bufInSecond = 60
                SubSampleRate = 4
                SubSampleStep = math.ceil(FrameRate / SubSampleRate)
                bufLen / SubSampleStep = bufInSecond * SubSampleRate = 8
             """
            startT1 = time()
            ret = None
            if countMain + bufLen < TotalFrames:  # 每次处理一个buffer。 这里确保后面有一个buffer的frames
                for i in range(bufLen):
                    ret, frame = cap.read()
                    if i % SubSampleStep == 0:  # 这里目标暂时是2秒的buffer中获取8帧。
                        buffer_frame_list.append(frame)  # 添加图片
                        buffer_viewport_list.append(LocationPerFrame[countMain])  # 添加对应的用户数据
                    countMain += 1
                baseId += 1
            else:
                break
            if not ret:
                print("countMain", countMain, "Frame out for " + VideoName + " or Less then 2s")
                break

            """ 
                1. 获取一个buffer内容后，每帧切5*5=25 个小块。组成200个小图片作为一个batch
                   然后放入CNN中训练
                   5*5*8: processed_data
                   len(buffer_viewport_list) = len(buffer_frame_list) = 8
                2. 此函数是将一个buffer内，取所有帧内特定一个块组成一组，有多少帧就有多少个小图片，块有多大，小图就有多大。
                   对于p_f中每一帧都提取其中用户观看的tile存在f中
                   一共 8 frames 提取其中的tiles，这里按照tile的顺序 1、2、3、4、……、25
                   每个指定序号的的tile有8个，一共有 200 个tiles
             """
            u, f, v = videoPro.processed_data_Tiles(W_Frame, H_Frame, [args.tile_x, args.tile_y], 1,
                                                    buffer_viewport_list, buffer_frame_list)
            # print(f'baseId=f{baseId} \n u: {len(u)}, {u}')
            # 为了方便numpy数组合并，第一个块单独完成，剩余24个块由for循环完成。然后每执行一次合并一次数组
            for index in range(1, args.tile_x * args.tile_y):
                au, af, av = videoPro.processed_data_Tiles(W_Frame, H_Frame, [args.tile_x, args.tile_y], index + 1,
                                                           buffer_viewport_list, buffer_frame_list)
                # print(f'u: {len(au)}, {au}')
                u.extend(au)
                f.extend(af)
                v.extend(av)

            """
                u 表示用户视野是否在tile内 只有 0，1 和 buffer_frame_list 对应 应该可以作为label
                f 存放每个tile 和p_u对应
                v 存放在视野范围内的tile 对应于 buffer_viewport_list 中1的部分
            """
            TenFram = ImageofCVToTensor(f).to(self.device)  # torch.Size([200, 3, 32, 32])
            self.optimizer.zero_grad()  # 把梯度置零，也就是把loss关于weight的导数变成0

            # CNN 预测FoV
            output = self.model(TenFram)
            # print(f'baseId={baseId}, FirstLoss={FirstLoss}')
            endT1 = time()
            totalT1 = endT1 - startT1

            # New updated result output
            score_list = []
            # 这里的 8 = SubSampleRate * bufInSecond = 4 * 2
            for i in range(bufLen // SubSampleStep + 1):
                for j in range(TotalTile):
                    score_list.append(output[j * 8 + i].data)

            # display the prediction result in one buffer

            # 处理第1个frame
            tile_score = []
            tile_first_score = []
            # Uframe = v[0]
            for i in range(TotalTile):
                tile_score.append(score_list[i])
                tile_first_score.append(score_list[i][0])

            sort_first_score = tile_first_score.copy()
            sort_first_score.sort()
            Thres = sort_first_score[int(TotalTile / 2 + (TotalTile / 8) * thres_factor)]

            step = 0
            UWL, UHL, UWH, UHH = 0, 0, 0, 0

            bandTileInBuffer = 0
            trainCorrectInBuffer = 0
            accList = []
            recallList = []
            precisionList = []
            lowBandTile = []
            highBandTile = []
            for i in range(bufLen):
                ret, frameB = capB.read()
                # 4920 / 60 = 82
                if i + baseId * bufLen >= len(LocationPerFrame):
                    print("ERROR...for LocationPerFrame", i, baseId, VideoName)
                    continue
                x = int(LocationPerFrame[i + baseId * bufLen][0])
                y = int(LocationPerFrame[i + baseId * bufLen][1])
                # 处理剩下7个frame
                if i % SubSampleStep == 0 and i != 0:
                    step += TotalTile
                    tile_score = []
                    tile_first_score = []
                    for k in range(TotalTile):
                        if k + step >= len(score_list):
                            print("ERROR...for tile_score")
                            exit()
                        tile_score.append(score_list[k + step])
                        tile_first_score.append(score_list[k + step][0])
                    sort_first_score = tile_first_score.copy()
                    sort_first_score.sort()
                    Thres = sort_first_score[int(TotalTile / 2 + (TotalTile / 8) * thres_factor)]  # best 4

                # cv.circle(frameB, (x, y), 100, (0, 0, 255), 8)
                UWL, UHL, UWH, UHH = CalculateUserView(x, y, W_Frame, H_Frame, W_Tile, H_Tile)
                # 当前观看位置，OpenCV画矩形函数：https://blog.csdn.net/sinat_41104353/article/details/85171185
                cv.rectangle(frameB, (UWL, UHL), (UWH, UHH), ColorView, 3)

                # if i % SubSampleStep == 0 and baseId >= 1:
                #     if self.LSTMflag and pre_Y is not None:
                #         x1, y1 = pre_Y[i // SubSampleStep][0], pre_Y[i // SubSampleStep][1]
                #         UWL1, UHL1 ,UWH1, UHH1 = CalculateUserView(x1, y1, W_Frame, H_Frame, W_Tile, H_Tile)
                #         UpdateTiletFeedback(TileByFeedback, UWL1, UHL1, UWH1, UHH1, W_Tile, H_Tile)
                #     else:
                #         UpdateTiletFeedback(TileByFeedback, UWL, UHL, UWH, UHH, W_Tile, H_Tile)

                PredictedTile = 0
                TileForCheck = []
                for k in range(TotalTile):
                    if tile_first_score[k] < Thres:
                        TileForCheck.append(1)
                        PredictedTile += 1
                        row = math.floor(k / args.tile_x)
                        col = k % args.tile_y
                        WL = int(col * W_Tile)
                        HL = int(row * H_Tile)
                        WH = int((col + 1) * W_Tile)
                        HH = int((row + 1) * H_Tile)
                        if WL < 0:
                            WL = 0
                        if HL < 0:
                            HL = 0
                        if WH > W_Frame:
                            WH = W_Frame
                        if HH > H_Frame:
                            HH = H_Frame
                        WL, HL, WH, HH = int(WL), int(HL), int(WH), int(HH)
                        # 画出预测的tile
                        cv.rectangle(frameB, (WL, HL), (WH, HH), ColorPrediction, 3)
                    else:
                        TileForCheck.append(0)

                for k in range(TotalTile):
                    if TileByFeedback[k] == 1:
                        row = math.floor(k / args.tile_x)
                        col = k % args.tile_y
                        WL = int(col * W_Tile)
                        HL = int(row * H_Tile)
                        WH = int((col + 1) * W_Tile)
                        HH = int((row + 1) * H_Tile)
                        if WL < 0:
                            WL = 0
                        if HL < 0:
                            HL = 0
                        if WH > W_Frame:
                            WH = W_Frame
                        if HH > H_Frame:
                            HH = H_Frame
                        WL, HL, WH, HH = int(WL), int(HL), int(WH), int(HH)
                        # 通过反馈弥补预测的tile
                        cv.rectangle(frameB, (WL, HL), (WH, HH), addColorPrediction, 3)
                """ 
                    TileForCheck 表示那个tile被选择，1 -- 被选择，0 -- 未被选择
                    TileByFeedback 初始全是0(其实可以提前更新...但是不是很符合直播场景)，之后会被更新
                 """
                _, tileTrueNum = CheckPredictResult(TileForCheck, UWL, UHL, UWH, UHH, W_Tile, H_Tile)
                addTileNum = CheckPredictResFeedback(TileForCheck, TileByFeedback)
                totalPreTile = addTileNum + PredictedTile

                # if PredictedTile >= TotalTile * 0.48:   # 缩小标准
                #     thres_factor = -3
                #     if i == 0:
                #         print(f"baseId={baseId}, control thres_factor={thres_factor}")
                # elif PredictedTile >= TotalTile * 0.36:
                #     thres_factor = -2
                #     if i == 0:
                #         print(f"baseId={baseId}, control thres_factor={thres_factor}")
                # else:
                #     thres_factor = -1

                allTileTrue, tileTrueNum = CheckPredictResult(TileForCheck, UWL, UHL, UWH, UHH, W_Tile, H_Tile)
                # metrics in one buffer
                bandTileInBuffer += totalPreTile
                trainCorrectInBuffer += allTileTrue
                # metrics in one video
                train_bandTile += totalPreTile  # 统计预测的tile数量
                train_correct += allTileTrue    # 统计采样帧是否预测准确

                # 动态调整 learning rate
                notMatch = 4 - tileTrueNum
                if notMatch == 1:
                    self.lr = 0.003
                if notMatch == 2:
                    self.lr = 0.006
                if notMatch == 3:
                    self.lr = 0.008
                if notMatch == 4:
                    self.lr = 0.01

                eps = 1e-3
                if i % SubSampleStep == 0:
                    accList.append(round((TotalTile-totalPreTile+tileTrueNum)/TotalTile, 4))
                    recallList.append(round(tileTrueNum/4, 4))
                    precisionList.append(round((tileTrueNum+eps)/(totalPreTile+eps), 4))
                    highTile = totalPreTile - tileTrueNum + 4
                    lowTile = TotalTile - highTile
                    lowBandTile.append(highTile * highRate + lowTile * lowRate)
                    highBandTile.append(TotalTile * highRate)

                cv.imshow('Frames', frameB)
                out.write(frameB)

                if cv.waitKey(1) & 0xFF == ord('q'):
                    break
            """ 
            CNN Live training
             """
            startT2 = time()
            user = torch.from_numpy(np.array(u))
            target = user.long().to(self.device)  # torch.Size([200])
            epoch, loss = 1, 1
            lossList = []
            for epoch in range(Epochs):
                self.optimizer.zero_grad()  # 把梯度置零，也就是把loss关于weight的导数变成0
                if epoch != 0:
                    output = self.model(TenFram)
                loss = self.criterion(output, target)  # Shape: torch.Size([200, 2]) torch.Size([200])
                lossList.append(loss.item())
                if epoch == 0:  # 原始代码是 1
                    FirstLoss = loss.item()
                if loss < 0.2:
                    break
                if loss < 0.4:
                    self.lr = 0.001
                loss.backward()
                self.optimizer.step()

            FinalLoss = loss.item()

            endT2 = time()
            ToTalTime = endT2 - startT2 + totalT1
            if baseId % SubSampleStep == 0:
                print("baseId:", baseId, "trianedEpochs:", Epochs,
                      "loss: %.4f --> %.4f" % (FirstLoss, FinalLoss), "time:", ToTalTime)

            writerLoss.writerow([round(trainCorrectInBuffer/bufLen, 4), round(ToTalTime, 5), epoch+1]
                                + [round(item, 5) for item in lossList])
            writerMetrics.writerow([bandTileInBuffer, round(bandTileInBuffer/bufLen, 2),
                                    np.mean(accList), np.mean(recallList), np.mean(precisionList),
                                    np.mean(lowBandTile), np.mean(highBandTile)])

            AverageAccList.append(np.mean(accList))
            AverageRecallList.append(np.mean(recallList))
            AveragePreciseList.append(np.mean(precisionList))
            AverageLowBandList.append(np.mean(lowBandTile))
            AverageHighBandList.append(np.mean(highBandTile))

            UpdateTiletFeedback(TileByFeedback, UWL, UHL, UWH, UHH, W_Tile, H_Tile)

        acc = round(train_correct / TotalFrames, 5)
        end = time()
        print(f'Test video={fileNameList[videoId]} for user={userId} finished, time={round(end - start, 5)}')
        writerLoss.writerows([['Accuracy', acc], ['SumTime', round(end - start, 5)]])
        writerMetrics.writerows([['Avarage Tile', int(train_bandTile / TotalFrames)],
                                 ['SumTime', round(end - start, 5)],
                                 ['PredictAccuracy', acc],
                                 ['AverageAccuracy', np.mean(AverageAccList)],
                                 ['AverageRecall', np.mean(AverageRecallList)],
                                 ['AveragePrecise', np.mean(AveragePreciseList)],
                                 ['AverageLowBand', np.mean(AverageLowBandList)],
                                 ['AverageHighBand', np.mean(AverageHighBandList)]])

        cap.release()
        capB.release()
        cv.destroyAllWindows()


if __name__ == '__main__':  # python3 LiveDeep.py --grid 5 --index 1
    args = get_args()
    """
        1. EpomaxL = [8,10,12,14] for Epochs in EpomaxL
        2. thres_factor_list = [0, 1, -1, -2] 为了设置阈值的取定 
           for thresFactor in thres_factor_list: [0 --> 1/2] [1 --> 5/8] [-1 --> 3/8] [-2 --> 1/4]
    """

    # video Filename list 共17个
    fileNameList = ['Conan1', 'Skiing', 'Alien', 'Conan2', 'Surfing', 'War', 'Cooking', 'Football', 'Rhinos']

    FileList = ["1-1-Conan Gore Fly", "1-2-Front", "1-3-360 Google Spotlight Stories_ HELP", "1-4-Conan Weird Al",
                "1-5-TahitiSurf", "1-6-Falluja", "1-7-Cooking Battle", "1-8-Football", "1-9-Rhinos",
                "2-1-Korean", "2-2-VoiceToy", "2-3-RioVR", "2-4-FemaleBasketball", "2-5-Fighting", "2-6-Anitta",
                "2-7-TFBoy", "2-8-reloaded"]

    highBitrate, lowBitrate = 91250, 39068
    highRate, lowRate = highBitrate / lowBitrate, 1
    bandwidth_dict = {"Conan1": 58193.0 / 18286.0, "Skiing": 149997.0 / 38100.0, "Alien": 71849.0 / 20417.0,
                      "Conan2": 88758.32 / 24370.03, "Surfing": 209369.63 / 50935.38, "War": 131147.0 / 33806.0,
                      "Cooking": 80397.0 / 25740.0, "Football": 2.4216, "Rhinos": 57806.0 / 16597.0}

    solver = Solver(args)
    for i in range(0, 1):  # 17 个视频编号
        start_time = time()
        if i == 7:
            continue
        for j in range(1, 2):  # 48 个用户编号
            highRate, lowRate = bandwidth_dict[fileNameList[i]], 1
            solver.run(i, j, args.thresFactor, args.epochCNN)
        end_time = time()
        print(f'Test video={fileNameList[i]} for 48 users finished, time={round(end_time - start_time, 4)}s\n')
