import math
import random
import time
import torch.nn as nn
import numpy as np
import torch
import torch.nn.functional as F
from Particle import Particle
from SelfTalk import GNNTalk
from data_loader import get_dataloaders
import os
from torch.utils.tensorboard import SummaryWriter
from tqdm import tqdm
from transformers import Wav2Vec2CTCTokenizer, Wav2Vec2Processor
import time

# from main import trainer

from pso_search_space import SearchSpace
# from args import reset_pso_parameters
# from models import initialModel, aggregator
# from norms import Gnorm
# from utils import load_data, accuracy, normalize_adj
# from utils import print_args


def bpr_loss_WN(user_emb, pos_item_emb):
    pos_score = torch.mul(user_emb, pos_item_emb).sum(dim=1)
    loss = -torch.log(10e-8 + torch.sigmoid(pos_score))
    return torch.mean(loss)

def trainer(args, train_loader, dev_loader, model, optimizer, criterion, epoch, last_train):
    save_path = os.path.join(args.dataset, args.save_path)
    save_path = save_path + '_' + str(args.feature_dim) + '_' + str(time.strftime("%m_%d_%H_%M", time.localtime()))
    os.makedirs(save_path, exist_ok=True)
    if last_train != 0:
        model.load_state_dict(torch.load(os.path.join(args.load_path, '{}_model.pth'.format(last_train)),
                                         map_location=torch.device('cpu')))
        model = model.to(args.device)
    writer = SummaryWriter(log_dir="log/{}".format(save_path.split('/')[-1]))
    processor = Wav2Vec2Processor.from_pretrained(r"/mnt/wav2vec2-large-xlsr-53-english")
    tokenizer = Wav2Vec2CTCTokenizer.from_pretrained(r"/mnt/wav2vec2-large-xlsr-53-english")
    iteration = 0

    # pso
    t_total = time.time()
    best_val = 0
    best_lambda_w = []
    best_lambda_w.append(args.w1)
    best_lambda_w.append(args.w2)
    best_lambda_w.append(args.w3)
    best_lambda_w.append(args.w4)

    best_lambda_w_history = []
    # pso_threshold = 0
    # val_acc = -1

    # if args.l_pso:  # launch pso
        # if pso_threshold > 0 and val_acc != -1:
        # pso = ChaoticParticleSwarm(args, x_min, x_max, max_vel)
        # pso.update()

            # print("123")
            # print(val_acc)
        # pso_threshold = pso_threshold + 1

        # pso_threshold = 0
        # print("111")
        # print(best_lambda_w)

    for e in range(epoch + 1):
        loss_log = []
        # train
        model.train()
        pbar = tqdm(enumerate(train_loader), total=len(train_loader))
        optimizer.zero_grad()

        for i, (audio, vertice, template, file_name) in pbar:
            iteration += 1
            # to gpu
            audio, vertice, template = audio.to(args.device), vertice.to(args.device), template.to(args.device)
            vertice_out, vertice, lip_features, text_hidden_states, logits, text_logits = model(audio, template,
                                                                                                vertice)
            loss1 = criterion(vertice_out, vertice)
            gt_vel = vertice[:, 1:, :] - vertice[:, :-1, :]
            pred_vel = vertice_out[:, 1:, :] - vertice_out[:, :-1, :]
            loss2 = criterion(pred_vel, gt_vel)
            # loss3 = criterion(lip_features, text_hidden_states)
            loss3 = bpr_loss_WN(lip_features, text_hidden_states)
            text_logits = torch.argmax(text_logits, dim=-1)
            log_probs = nn.functional.log_softmax(logits, dim=-1, dtype=torch.float32).transpose(0, 1)
            text_logits = processor.batch_decode(text_logits)
            text_logits = tokenizer(text_logits, return_tensors="pt").input_ids
            text_logits = text_logits.to(args.device)
            loss4 = nn.functional.ctc_loss(
                log_probs,
                text_logits,
                torch.tensor([log_probs.shape[0]]),
                torch.tensor([text_logits.shape[1]]),
                blank=0,
                reduction="mean",
                zero_infinity=True,
            )
            # loss = torch.mean(1000 * loss1 + 1000 * loss2 + 0.001 * loss3 + 0.0001 * loss4)
            loss = best_lambda_w[0] * loss1 + best_lambda_w[1] * loss2 + best_lambda_w[2]/10000 * loss3 + best_lambda_w[
                3]/100000 * loss4
            loss.backward()
            loss_log.append(loss.item())
            if i % args.gradient_accumulation_steps == 0:
                optimizer.step()
                optimizer.zero_grad()

            pbar.set_description(
                "(Epoch {}, iteration {}) TRAIN LOSS:{:.7f}, loss1:{:.7f}, loss2:{:.7f}, loss3:{:.7f}, loss4:{:.7f}".format(
                    e, iteration, loss.item(), loss1.item(), loss2.item(), loss3.item(), loss4.item()))
        writer.add_scalar("train/loss1", loss1.item(), e)
        writer.add_scalar("train/loss2", loss2.item(), e)
        writer.add_scalar("train/loss3", loss3.item(), e)
        writer.add_scalar("train/loss4", loss4.item(), e)
        writer.add_scalar("train/loss", loss.item(), e)
        writer.add_scalar("train/lr", optimizer.param_groups[0]['lr'], e)

        # validation
        valid_loss_log = []
        model.eval()
        with torch.no_grad():

            for audio, vertice, template, file_name in dev_loader:
                # to gpu
                audio, vertice, template = audio.to(args.device), vertice.to(args.device), template.to(args.device)
                train_subject = "_".join(file_name[0].split("_")[:-1])
                vertice_out, vertice, lip_features, text_hidden_states, logits, text_logits = model(audio, template,
                                                                                                    vertice)
                loss = criterion(vertice_out, vertice)
                valid_loss_log.append(loss.item())
            writer.add_scalar("val_loss", loss.item(), e)

        current_loss = np.mean(valid_loss_log)
        val_acc = 0.00001 - current_loss
        if (e > 0 and e % 25 == 0) or e == args.max_epoch:
            torch.save(model.state_dict(), os.path.join(save_path, '{}_model.pth'.format(e)))

        if val_acc > best_val:
            best_val = val_acc

        print("epcoh: {}, current loss:{:.7f}".format(e + 1, current_loss))


        best_lambda_w_history.append(best_lambda_w.copy())
        # 定义保存pso权重文件的路径
        file_path = os.path.join(save_path, "best_lambda_w_history.txt")
        with open(file_path, "w") as file:
            for epoch, values in enumerate(best_lambda_w_history):
                line = " ".join(map(str, values))
                file.write(f"Epoch {epoch + 1}: {line}\n")
    writer.close()
    print("Total time elapsed: {:.4f}s".format(time.time() - t_total))
    print(f'Best val: {best_val:.7f}')
    return model, best_val


def count_parameters(model):
    return sum(p.numel() for p in model.parameters() if p.requires_grad)
def reset_pso_parameters(args, loss_selection):
    args.w1 = loss_selection[0]
    args.w2 = loss_selection[1]
    args.w3 = loss_selection[2]
    args.w4 = loss_selection[3]


class ChaoticParticleSwarm:
    def __init__(self, args, x_min, x_max, max_vel):
        self.args = args
        self.x_min = x_min
        self.x_max = x_max
        self.max_vel = max_vel  # 粒子最大速度
        self.best_fitness_value = float('-Inf')
        self.best_position = [0 for i in range(self.args.particle_dim)]  # 种群最优位置
        self.fitness_val_list = []  # 每次迭代最优适应值
        self.genGlobalBest = [[0] * self.args.particle_dim for i in range(self.args.iterations)]  # 每一轮迭代的全局最优位置
        self.hybrid_search_space = SearchSpace()

        # 加载数据集
        # self.adj, self.features, self.labels, self.idx_train, self.idx_val, self.idx_test, _ = load_data(
        #     dataset=self.args.dataset)
        # self.n_nodes, self.feat_dim = self.features.shape
        # self.adj_norm = normalize_adj(self.adj)
        # self.labels = torch.LongTensor(self.labels)

        # 对种群进行初始化
        self.Particle_list = [Particle(self.x_min, self.x_max, self.max_vel, self.args.particle_dim) for i in
                              range(self.args.particle_num)]
        for part in self.Particle_list:
            fit = self.get_fitness(part.get_pos())
            part.set_fitness_value(fit)
            for i in range(self.args.particle_dim):
                part.set_best_pos(i, part.get_pos()[i])
            if fit > self.best_fitness_value:
                self.best_fitness_value = fit
                self.best_position = part.get_pos()

        # seed 初始化
        self.args.cuda = not self.args.no_cuda and torch.cuda.is_available()
        np.random.seed(self.args.seed)
        torch.manual_seed(self.args.seed)
        if self.args.cuda:
            torch.cuda.manual_seed(self.args.seed)

    def get_fitness(self, particle):  # 适应函数

        loss_selection = self.hybrid_search_space.get_instance_pso(
            particle)
        reset_pso_parameters(self.args, loss_selection)
        print('============================\n w1={}, w2={}, w3={}, w4={}'.format(self.args.w1, self.args.w2, self.args.w3, self.args.w4))

        model = GNNTalk(self.args)
        print("model parameters: ", count_parameters(model))

        # to cuda
        assert torch.cuda.is_available()
        model = model.to(self.args.device)

        # load data
        dataset = get_dataloaders(self.args)
        # loss
        criterion = nn.MSELoss()

        optimizer = torch.optim.Adam(filter(lambda p: p.requires_grad, model.parameters()), lr=self.args.lr)
        _, fit = trainer(self.args, dataset["train"], dataset["valid"], model, optimizer, criterion, epoch=self.args.max_epoch,
                         last_train=self.args.last_train)

        return fit

    # 更新速度
    def update_vel(self, part):
        for i in range(self.args.particle_dim):
            ww = self.args.W * part.get_vel()[i]
            vel_value = self.args.c1 * random.random() * (
                    part.get_best_pos()[i] - part.get_pos()[i]) + self.args.c2 * random.random() * (
                                self.best_position[i] - part.get_pos()[i])
            if vel_value > self.max_vel[i]:
                vel_value = self.max_vel[i]
            elif vel_value < -self.max_vel[i]:
                vel_value = -self.max_vel[i]
            part.set_vel(i, vel_value)

    # 更新位置
    def update_pos(self, part):
        for i in range(self.args.particle_dim):
            # pos_value = part.get_pos()[i] + part.get_vel()[i]
            if (part.get_vel()[i] > (self.x_max[i] / 1000)) & (part.get_vel()[i] <= 1):
                pos_value = part.get_pos()[i] + 1
            elif (part.get_vel()[i] >= -1) & (part.get_vel()[i] < (-self.x_max[i] / 1000)):
                pos_value = part.get_pos()[i] - 1
            elif (part.get_vel()[i] > 1) & (part.get_vel()[i] <= self.max_vel[i]):
                pos_value = part.get_pos()[i] + math.ceil(part.get_vel()[i])
            elif (part.get_vel()[i] < -1) & (part.get_vel()[i] >= -self.max_vel[i]):
                pos_value = part.get_pos()[i] - math.floor(part.get_vel()[i])
            else:
                pos_value = part.get_pos()[i]

            if pos_value > self.x_max[i]:
                pos_value = pos_value % (self.x_max[i] + 1)
            elif pos_value < self.x_min[i]:
                pos_value = pos_value + self.x_max[i] + 1

            part.set_pos(i, pos_value)
        fitness = self.get_fitness(part.get_pos())
        if fitness > part.get_fitness_value():
            part.set_fitness_value(fitness)
            for i in range(self.args.particle_dim):
                part.set_best_pos(i, part.get_pos()[i])
        if fitness > self.best_fitness_value:
            self.best_fitness_value = fitness
            self.best_position = part.get_pos()

    def update(self):
        # self.load_training_data()

        avgofdimension = [0.0 for i in range(self.args.particle_dim)]
        pm = 0.45
        exnon = 0.01
        x = 0.6
        pdavg = [0.0 for i in range(self.args.particle_dim)]
        variance = [0.0 for i in range(self.args.particle_dim)]

        for i in range(self.args.iterations):

            avgofdistance = 0.0  # 计算δ(pBest,gBest)  Eq.10
            for part in self.Particle_list:
                dist = 0
                for dim in range(self.args.particle_dim):
                    dist += math.pow((self.best_position[dim] - part.get_best_pos()[dim]), 2)
                avgofdistance += math.sqrt(dist)
            avgofdistance /= self.args.particle_num

            # for dim in range(self.args.particle_dim):  # 计算每一轮迭代的全局最优位置
            #     self.genGlobalBest[i][dim] = self.best_position[dim]

            if avgofdistance < 4:  # δd 设为 0.6

                # gBest 扰动维度距离计算
                for dim in range(self.args.particle_dim):  # 计算δ(gBest(particle/d)) Eq.12
                    avgofdimension[dim] = 0.0
                    for j in range(i + 1):
                        avgofdimension[dim] += math.pow((self.genGlobalBest[j][dim] - self.genGlobalBest[i][dim]), 2)
                    avgofdimension[dim] = math.sqrt(avgofdimension[dim] / (i + 1))

                # gBest 扰动判断和操作
                for dim in range(self.args.particle_dim):
                    if avgofdimension[dim] < 0.6:  # εδ(gbest)∈[0,10^-10]
                        x = 4 * x * (1 - x)
                        r6 = random.random()
                        r1 = 32767
                        if exnon < pm - r6:
                            timesequence = int(x * r1) % (self.x_max[dim])
                            self.best_position[dim] = (self.best_position[dim] + timesequence) % (self.x_max[dim])
                            print('第' + str(dim) + '维进行gBest扰动操作')
                        elif r6 > pm:
                            self.best_position[dim] = self.best_position[dim] - random.randint(0, self.x_max[dim])
                            if self.best_position[dim] < self.x_min[dim]:
                                self.best_position[dim] = self.best_position[dim] + self.x_max[dim]
                            print('第' + str(dim) + '维进行gBest扰动操作')

                # pBest 扰动维度距离计算

                # 计算每维位置的平均值
                for dim in range(self.args.particle_dim):
                    pdavg[dim] = 0.0
                    for part in self.Particle_list:
                        pdavg[dim] += part.get_best_pos()[dim]
                    pdavg[dim] /= self.args.particle_num

                # 计算每个位置的方差
                for dim in range(self.args.particle_dim):
                    variance[dim] = 0.0
                    for part in self.Particle_list:
                        variance[dim] += math.pow((part.get_best_pos()[dim] - pdavg[dim]), 2)
                    variance[dim] = math.sqrt(variance[dim] / self.args.particle_num)

                # pBest扰动
                for dim in range(self.args.particle_dim):
                    if variance[dim] < 0.9:  # εδ(pbest)∈[0,10^-10]
                        for part in self.Particle_list:
                            r3 = random.random()
                            if exnon < pm - r3:
                                part.set_best_pos(dim,
                                                  (part.get_best_pos()[dim] + random.randint(0, self.x_max[dim])) % (
                                                      self.x_max[dim]))
                                print(str(part) + '粒子的第' + str(dim) + '维进行pBest扰动操作')
                            elif r3 > pm:
                                part.set_best_pos(dim, part.get_best_pos()[dim] + self.x_max[dim])
                                print(str(part) + '粒子的第' + str(dim) + '维进行pBest扰动操作')

            for part in self.Particle_list:
                self.update_vel(part)  # 更新速度
                self.update_pos(part)  # 更新位置
            self.fitness_val_list.append(self.best_fitness_value)  # 每次迭代完把当前的最优适应度存到列表
            print(f'fitness:{self.best_fitness_value:.4f}')
            print('第' + str(i + 1) + '轮iteration的pos:' + str(self.best_position))
            # print('第' + str(i + 1) + '轮iteration的params:' + str(self.best_params))
            #             # print('第' + str(i + 1) + '轮iteration的time:' + str(self.best_time))
            #             # print('第' + str(i + 1) + '轮iteration的acc:' + str(self.best_acc))
        return self.fitness_val_list, self.best_position
