import json

import torch
import torch.nn as nn
import os
import numpy as np

from torch import optim
from torchvision import models
from torch.utils.data import DataLoader, Subset
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score, recall_score

from .file_utils import *
from .config import *
from .log_util import Logger
from .resnet import ResNet34Custom
from .process_bar import ProcessBar

logger = Logger('')

net_presets = {
    '128': [128, 128, 64, 32],
    '256': [256, 256, 128, 64],
    '512': [512, 512, 256, 128],
    '1024': [1024, 1024, 512, 256]
}
global_net_preset = ''

batchSize, epochs, learningRate, weightDecay, stepSize, gamma = trainPram['batchSize'], trainPram['epochs'], trainPram['learningRate'], trainPram['weightDecay'], trainPram['stepSize'], trainPram['gamma']


class FeatureExtractor(nn.Module):
    def __init__(self, origin_model_path=''):
        super(FeatureExtractor, self).__init__()
        self.resnet = ResNet34Custom(1251)
        # 加载预训练参数
        if origin_model_path != '':
            self.resnet.load_state_dict(torch.load(origin_model_path))
        # 替换输出层
        preset = net_presets[global_net_preset]
        self.resnet.fc = nn.Linear(256 * 1, preset[0])
    
    def forward(self, x):
        return self.resnet(x)


class SiameseNetwork(nn.Module):
    def __init__(self):
        super(SiameseNetwork, self).__init__()
        preset = net_presets[global_net_preset]
        # 定义一个全连接网络作为特征比较器
        self.feature_comparator = nn.Sequential(
            nn.Linear(preset[0], preset[1]),
            nn.ReLU(),
            nn.Linear(preset[1], preset[2]),
            nn.ReLU(),
            nn.Dropout(0.2),  # 在第二层和第三层之间添加Dropout
            nn.Linear(preset[2], preset[3]),
            nn.ReLU(),
        )
        # 定义一个额外的网络层来处理特征差异并计算相似度评分
        self.similarity_estimator = nn.Sequential(
            nn.Sigmoid()
        )

    def forward(self, feature1, feature2):
        # 分别通过相同的网络处理两个特征向量
        out1 = self.feature_comparator(feature1)
        out2 = self.feature_comparator(feature2)

        # 计算两个输出的差异度
        difference = torch.abs(out1 - out2)

        # 通过另一个网络层来评估差异并输出最终的相似度评分
        similarity = self.similarity_estimator(difference)
        return similarity


class EndToEndModel(nn.Module):
    def __init__(self, feature_extractor, siamese_network):
        super(EndToEndModel, self).__init__()
        self.feature_extractor = feature_extractor  # 特征提取网络
        self.siamese_network = siamese_network  # 相似度判断网络

    def forward(self, input1, input2):
        # 提取两个输入的特征
        feature1 = self.feature_extractor(input1)
        feature2 = self.feature_extractor(input2)

        # 计算两个特征的相似度
        similarity = self.siamese_network(feature1, feature2)
        return similarity

    def predict(self, input1, input2, threshold=0.5):
        self.eval()
        input1 = input1.to(next(self.parameters()).device)
        input2 = input2.to(next(self.parameters()).device)
        with torch.no_grad():
            feature1 = self.feature_extractor(input1)
            feature2 = self.feature_extractor(input2)
            similarity = self.siamese_network(feature1, feature2)
            predicted = (similarity > threshold).float()
        return predicted


def train_model(model, train_loader, val_loader, criterion, optimizer, scheduler, device, model_root,
                epochs=epochs, current_epoch=1):
    rest_epoch = epochs - current_epoch + 1
    for _ in range(rest_epoch):
        sample_amount = len(train_loader)
        log_text = f'Epoch {current_epoch}/{epochs} starts, {sample_amount} batches'
        print(log_text)
        logger.log(log_text)
        model.train()
        running_loss = 0.0
        process_bar = ProcessBar(sample_amount)
        for index, (input1, input2, labels) in enumerate(train_loader):
            input1, input2, labels = input1.to(device), input2.to(device), labels.to(device).float()  # Ensure labels are float
            optimizer.zero_grad()
            outputs = model(input1, input2)  # Assuming outputs are logits
            loss = criterion(outputs.squeeze(), labels)  # Ensure output and labels dimensions match
            loss.backward()
            optimizer.step()
            running_loss += loss.item()
            process_bar.print(index)
        scheduler.step()  # Update learning rate
        log_text = f'Epoch {current_epoch}/{epochs}, Loss: {running_loss / sample_amount}'
        print('\n', log_text)
        logger.log(log_text)

        # Validate the model
        log_text = 'Validation results:\nthreshold\taccuracy'
        print(log_text)
        logger.log(log_text)

        model.eval()
        thresholds = np.linspace(0, 1, 20)  # 创建20个阈值，从0到1
        far_record = []
        frr_record = []

        process_bar.init(len(thresholds))

        for index, threshold in enumerate(thresholds):
            fa = 0  # 假接受数量
            fr = 0  # 假拒绝数量
            total_accept = 0  # 实际为正的总数
            total_reject = 0  # 实际为负的总数

            with torch.no_grad():
                for input1, input2, labels in val_loader:
                    input1, input2, labels = input1.to(device), input2.to(device), labels.to(device).to(torch.int64)
                    outputs = model(input1, input2).squeeze()  # 使用模型的预测方法，假设predict已经在模型定义中
                    predicted = outputs > threshold

                    fa += ((predicted == 1) & (labels == 0)).int().sum().item()
                    fr += ((predicted == 0) & (labels == 1)).int().sum().item()
                    total_accept += (labels == 1).int().sum().item()
                    total_reject += (labels == 0).int().sum().item()

            far = 100.0 * fa / total_reject if total_reject else 0
            frr = 100.0 * fr / total_accept if total_accept else 0

            far_record.append(far)
            frr_record.append(frr)

            process_bar.print(index)

        # 转换为numpy数组
        far = np.array(far_record)
        frr = np.array(frr_record)

        # 找到FAR和FRR差异最小的点，这是EER
        idx = np.argmin(np.abs(far - frr))
        eer_threshold = thresholds[idx]
        eer = (far[idx] + frr[idx]) / 2

        log_text = f'{eer_threshold:.4f}\t{eer:.2f}%'
        print(log_text)
        logger.log(log_text)

        # Save model after each epoch
        if not os.path.exists(model_root):
            os.makedirs(model_root)
        model_path = os.path.join(get_model_path(model_root), f'model_{current_epoch}')
        torch.save(model, model_path)
        print(f'Model saved to {model_path}')
        info = {'finished_epoch': current_epoch}
        with open(os.path.join(model_root, 'model_info.json'), 'w') as f:
            json.dump(info, f)

        current_epoch += 1


class Trainer:
    def __init__(self, model_root, train_loader, test_loader, pretrained_resnet_path='', preset='1024', resume=False):
        self.resume = resume
        self.pretrained_resnet_path = pretrained_resnet_path
        self.model_root = model_root
        self.current_epoch = 1
        self.train_loader = train_loader
        self.test_loader = test_loader

        # 设置网络预设
        global global_net_preset
        global_net_preset = preset

        # 日志模块的初始化
        global logger
        logger = Logger(get_model_log_path(model_root))

    def train(self):
        device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
        print(f"Training on {device}")

        model_path = get_model_path(self.model_root)
        if self.resume:
            if os.path.exists(model_path):
                model = torch.load(model_path)
                print("Resuming model from", model_path)
                info_path = get_model_info_path(self.model_root)
                with open(info_path, 'r') as f:
                    info = json.load(f)
                self.current_epoch = info['finished_epoch'] + 1
                print("Resuming training at epoch", self.current_epoch)
            else:
                raise FileNotFoundError('模型不存在')
        else:
            feature_extractor = FeatureExtractor(self.pretrained_resnet_path)
            siamese_network = SiameseNetwork()
            model = EndToEndModel(feature_extractor, siamese_network)

            model.to(device)

        # Define the loss function and optimizer
        criterion = nn.BCELoss()
        optimizer = optim.Adam(model.parameters(), lr=learningRate, weight_decay=weightDecay)
        scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=stepSize, gamma=gamma)

        train_model(model, self.train_loader, self.test_loader, criterion, optimizer,
                    scheduler, device, self.model_root, current_epoch=self.current_epoch)
