import numpy as np
import pandas as pd
import torch
import torch.nn as nn
from torch.utils.data import Dataset, DataLoader
from torchvision import transforms
# from bayes_opt import BayesianOptimization
from prepare_data import *
import torch.nn.functional as F
from evaluate_func import *

# torch.cuda.set_device(0)
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
SEED = 0
torch.manual_seed(SEED)
torch.cuda.manual_seed(SEED)

def data_count(score):
    normalinzed_tensor = F.normalize(torch.tensor(score),dim=0).tolist()
    score = np.array(score)
    # normalinzed_tensor = (score - np.mean(score) / np.std(score))
    mean = np.mean(score)
    variance = np.var(score)
    return normalinzed_tensor, mean, variance
def train(model, train_data, train_dict, opt, epoch, loss_func1, loss_func2, optimizer):
    epoch_loss, iter = 0, 0
    paint_to_course, course_to_dimension, dimension_to_idx = get_all_reflect_relation()
    for img, labels, last_label, names in train_data:
        iter += 1
        img = img.to(device)
        labels = torch.tensor(labels, dtype=torch.float32).cuda()
        score, score2 = model(img)
        true_min_label = []
        for idx in range(len(labels)):
            one_label = labels.tolist()[idx]
            temp_list = []
            for va in course_to_dimension[paint_to_course[names[idx]]]:
                # print(names[idx])
                temp_list.append(one_label[dimension_to_idx[va]])
            true_min_label.append(temp_list.index(min(temp_list)))
        loss1 = loss_func1(score, labels)
        # true_min_label = integers_to_onehot(true_min_label, max_value=76)
        true_min_label = torch.tensor(true_min_label).cuda()
        # loss2 = nn.CrossEntropyLoss()(score2, true_min_label.long())
        loss2 = loss_func2(score, labels)
        loss = loss1 + loss2
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()
        epoch_loss += loss.item()
    epoch_loss /= iter
    print("loss is", epoch_loss)
    return model, epoch_loss


# def evaluate(score, label):
#     score, label = torch.tensor(score), torch.tensor(label)
#     mse = torch.mean(torch.abs(score - label) ** 2)
#     mae = torch.mean(torch.abs(score - label))
#     rmse = np.sqrt(mse)
#     # print(" mae:", mae, "mse:", mse, "rmse", rmse)
#     return mae, mse, rmse

def cal_r2_score(y_pred, y_true):
    ss_res = torch.sum((y_true - y_pred) ** 2)  # 残差平方和
    ss_tot = torch.sum((y_true - torch.mean(y_true)) ** 2)  # 总平方和
    r2 = 1 - (ss_res / ss_tot)  # R²计算公式
    return r2

def cal_pearson_correlation(y_pred, y_true):
    y_pred_mean = torch.mean(y_pred)
    y_true_mean = torch.mean(y_true)
    numerator = torch.sum((y_pred - y_pred_mean) * (y_true - y_true_mean))  # 分子
    denominator = torch.sqrt(torch.sum((y_pred - y_pred_mean) ** 2) * torch.sum((y_true - y_true_mean) ** 2))  # 分母
    return numerator / denominator

# def cal_mape(y_pred, y_true):
#     print(f'y_pred is {y_pred}')
#     print(f'y_true is {y_true}')
#     return torch.mean(torch.abs((y_true - y_pred) / y_true)) * 100 

def cal_mape(y_pred, y_true):
    mask = y_true != 0  # 过滤掉 y_true 中为 0 的元素
    y_pred_filtered = y_pred[mask]
    y_true_filtered = y_true[mask]
    return torch.mean(torch.abs((y_true_filtered - y_pred_filtered) / y_true_filtered)) * 100  # 百分比误差


# def cal_mpe(y_pred, y_true):
#     return torch.mean((y_true - y_pred) / y_true) * 100  # 百分误差

def cal_mpe(y_pred, y_true):
    mask = y_true != 0  # 过滤掉 y_true 中为 0 的元素
    y_pred_filtered = y_pred[mask]
    y_true_filtered = y_true[mask]
    return torch.mean((y_true_filtered - y_pred_filtered) / y_true_filtered) * 100  # 百分误差



def evaluate(score, label):
    score, label = torch.tensor(score), torch.tensor(label)
    mse = torch.mean(torch.abs(score - label) ** 2)
    mae = torch.mean(torch.abs(score - label))
    rmse = np.sqrt(mse)
    r2, pea, mape, mpe = cal_r2_score(score, label), cal_pearson_correlation(score, label), cal_mape(score, label), cal_mpe(score, label)
    return mae, mse, rmse, r2, pea, mape, mpe



def test(model, test_data):
    paint_to_course, course_to_dimension, dimension_to_idx = get_all_reflect_relation()
    true_list1, rec_list1, true_list2, rec_list2 = [], [], [], []
    for img, labels, last_label, names in test_data:
        img = img.to(device)
        labels = labels.to(device)
        score, score2 = model(img)
        true_min_label = []
        for idx in range(len(labels)):
            one_label = labels.tolist()[idx]
            temp_list = []
            t1 = paint_to_course[names[idx]]
            t1 = course_to_dimension[t1]
            for va in course_to_dimension[paint_to_course[names[idx]]]:
                temp_list.append(one_label[dimension_to_idx[va]])
            true_min_label.append([temp_list.index(min(temp_list))])
        true_list1 = true_list1 + true_min_label
        temp, score2 = torch.topk(score2,k=20)
        rec_list1 = rec_list1 + score2.tolist()
    test_true_rating = cal_result(true_list1, rec_list1)
    return test_true_rating


def StrengthenData(train_set, train_dict):
    ################################数据增强#################################
    train_data = [i[0] for i in train_set]
    train_labels = [i[1:] for i in train_set]
    transform = transforms.Compose([
        transforms.RandomHorizontalFlip(),
        transforms.RandomRotation(20)
    ])
    train_data_len, label_counts = {}, {}
    for i in range(11):
        train_data_len[i], label_counts[i] = len(train_dict[i]), len(train_dict[i])
    train_dict = {}
    for i in range(7):
        train_dict[i] = []
    for i in range(len(train_data)):
        x, y = train_data[i], train_labels[i]
        va = y[-1].tolist()
        idx = va.index(max(va))
        train_dict[idx].append([x, y[0], y[-1]])
        need_count = int((train_data_len[idx + 3] + 61) / 62) * 62 - label_counts[idx + 3]
        if need_count:
            x_add = transform(x)
            train_data = train_data + [x_add]
            train_labels = train_labels + [y]
            label_counts[idx + 3] += 1
            train_dict[idx].append([x_add, y[0], y[-1]])
    train_set = [list(x) for x in zip(train_data, train_labels[0], train_labels[1])]
    return train_set, train_dict
    ################################数据增强#################################


# def BayesianOpt():
#     ##############贝叶斯调参###############################
#     def block_box_function(num_layers, num_heads, mlp_ratio, Kernel_size1, Stride1, Kernel_size2, Stride2,
#                            num_layers_last, num_heads_last, lr, l2):
#         Kernel_size1 = [1, 2, 4, 8][int(Kernel_size1)]
#         Kernel_size2 = [1, 2, 4, 8][int(Kernel_size2)]
#         Stride1 = [1, 2, 4, 8][int(Stride1)]
#         Stride2 = [1, 2, 4, 8][int(Stride2)]
#         num_layers = int(num_layers)
#         num_heads = [2, 4, 6, 8, 12, 16, 24][int(num_heads)]
#         num_heads_last = [2, 4, 6, 8, 12, 16, 24][int(num_heads_last)]
#         mlp_ratio = float(mlp_ratio)
#         num_layers_last = int(num_layers_last)
#         res = 0
#         embedding_dim = int((256 - Kernel_size1) // Stride1 + 1)
#         embedding_dim = int((embedding_dim - 2) // 2 + 1)
#         embedding_dim = int((embedding_dim - Kernel_size2 + 2) // Stride2 + 1)
#         embedding_dim = int((embedding_dim - 2) // 2 + 1)
#         embedding_dim = int(embedding_dim * embedding_dim * 3)
#         if embedding_dim % num_heads != 0:
#             num_heads = 3
#         if embedding_dim % num_heads_last != 0:
#             num_heads_last = 3
#         print(f"embedding_dim  is {embedding_dim}, num_heads is {num_heads}")
#
#         model = AesModel(embedding_dim=embedding_dim, num_layers=num_layers, num_heads=num_heads,
#                          mlp_ratio=float(mlp_ratio), Kernel_size1=Kernel_size1, Stride1=Stride1,
#                          Kernel_size2=Kernel_size2, Stride2=Stride2, num_layers_last=num_layers_last,
#                          num_heads_last=num_heads_last).cuda()
#         optimizer = optim.Adam(model.parameters(), lr=lr, weight_decay=l2)
#         loss_func1 = nn.BCEWithLogitsLoss()
#         loss_func2 = nn.BCEWithLogitsLoss()
#         for epoch in range(opt.epoch):
#             model = train(model, train_data, optimizer=optimizer, loss_func1=loss_func1, loss_func2=loss_func2)
#             res = res + test(model, train_data)
#         res = res / opt.epoch
#         return res
#
#     pbounds = {'num_layers': (2, 10),
#                'num_layers_last': (2, 10),
#                'num_heads': (0, 6),
#                'num_heads_last': (0, 6),
#                'mlp_ratio': (0.0, 6.0),
#                'Kernel_size1': (0, 4),
#                'Kernel_size2': (0, 4),
#                'Stride1': (0, 4),
#                'Stride2': (0, 4),
#                'lr': (1e-5, 1e-2),
#                'l2': (1e-5, 1e-2),
#                }
#     Optimizer = BayesianOptimization(f=block_box_function,
#                                      pbounds=pbounds,
#                                      verbose=2,
#                                      random_state=1)
#
#     Optimizer.maximize(init_points=10, n_iter=30)
#     print(Optimizer.max)
#     #############贝叶斯调参###############################

