import random
import torch
import torch.autograd as autograd
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
import math
from math import ceil
import sys
import torch.optim as optim
import pandas as pd
import os
import warnings

# 忽略所有警告
warnings.filterwarnings("ignore")

# 氨基酸合成难度系数
SYNTH_DIFFICULTY = {
    'A': 1.0, 'R': 1.8, 'N': 1.5, 'D': 1.5, 'C': 1.7,
    'E': 1.6, 'Q': 1.6, 'G': 1.0, 'H': 1.7, 'I': 1.4,
    'L': 1.4, 'K': 1.5, 'M': 1.6, 'F': 1.8, 'P': 1.3,
    'S': 1.2, 'T': 1.3, 'W': 2.0, 'Y': 1.8, 'V': 1.3
}

MAX_SEQ_LEN = 18
data = pd.read_csv('AMPdb_data.csv', skiprows=1, usecols=range(3), header=None, names=['ID', 'seq', 'len'])
all_sequences = np.asarray(data['seq'])

CHARACTER_DICT = {
    'A': 1, 'C': 2, 'E': 3, 'D': 4, 'F': 5, 'I': 6, 'H': 7,
    'K': 8, 'M': 9, 'L': 10, 'N': 11, 'Q': 12, 'P': 13, 'S': 14,
    'R': 15, 'T': 16, 'W': 17, 'V': 18, 'Y': 19, 'G': 20, 'O': 21, 'U': 22, 'Z': 23, 'X': 24}
INDEX_DICT = {
    1: 'A', 2: 'C', 3: 'E', 4: 'D', 5: 'F', 6: 'I', 7: 'H',
    8: 'K', 9: 'M', 10: 'L', 11: 'N', 12: 'Q', 13: 'P', 14: 'S',
    15: 'R', 16: 'T', 17: 'W', 18: 'V', 19: 'Y', 20: 'G', 21: 'O', 22: 'U', 23: 'Z', 24: 'X'}


def sequence_to_vector(sequence):
    default = np.asarray([25] * (MAX_SEQ_LEN))
    for i, character in enumerate(sequence[:MAX_SEQ_LEN]):
        default[i] = CHARACTER_DICT.get(character, 25)  # 使用get避免键错误
    return default.astype(int)


def vector_to_sequence(vector):
    return ''.join([INDEX_DICT.get(item, '') for item in vector if item in INDEX_DICT])


def calculate_synth_difficulty(sequence):
    difficulty = 0.0
    for aa in sequence:
        difficulty += SYNTH_DIFFICULTY.get(aa, 1.5)
    return difficulty / len(sequence)


# 创建数据
all_data = []
for seq in all_sequences:
    all_data.append(sequence_to_vector(seq))


class Generator(nn.Module):
    def __init__(self, embedding_dim, hidden_dim, vocab_size, max_seq_len, gpu=False, oracle_init=False):
        super(Generator, self).__init__()
        self.hidden_dim = hidden_dim
        self.embedding_dim = embedding_dim
        self.max_seq_len = max_seq_len
        self.vocab_size = vocab_size
        self.gpu = gpu

        self.embeddings = nn.Embedding(vocab_size, embedding_dim)
        self.gru = nn.GRU(embedding_dim, hidden_dim)
        self.gru2out = nn.Linear(hidden_dim, vocab_size)

        if oracle_init:
            for p in self.parameters():
                nn.init.normal_(p, 0, 1)

    def init_hidden(self, batch_size=1):
        h = torch.zeros(1, batch_size, self.hidden_dim)
        return h.cuda() if self.gpu else h

    def forward(self, inp, hidden):
        emb = self.embeddings(inp)
        emb = emb.view(1, -1, self.embedding_dim)
        out, hidden = self.gru(emb, hidden)
        out = self.gru2out(out.view(-1, self.hidden_dim))
        out = F.log_softmax(out, dim=1)
        return out, hidden

    def sample(self, num_samples, start_letter=0):
        samples = torch.zeros(num_samples, self.max_seq_len, dtype=torch.long)
        samples_p = torch.zeros(num_samples, self.max_seq_len, dtype=torch.float)

        h = self.init_hidden(num_samples)
        inp = torch.LongTensor([start_letter] * num_samples)

        if self.gpu:
            samples = samples.cuda()
            inp = inp.cuda()

        for i in range(self.max_seq_len):
            out, h = self.forward(inp, h)
            out_p, _ = torch.max(torch.exp(out), dim=1)
            out = torch.multinomial(torch.exp(out), 1)
            samples_p[:, i] = out_p
            samples[:, i] = out.view(-1).data
            inp = out.view(-1)
        return samples, samples_p

    def batchNLLLoss(self, inp, target):
        loss_fn = nn.NLLLoss()
        batch_size, seq_len = inp.size()
        inp = inp.permute(1, 0)
        target = target.permute(1, 0)
        h = self.init_hidden(batch_size)

        loss = 0
        for i in range(seq_len):
            out, h = self.forward(inp[i], h)
            loss += loss_fn(out, target[i])
        return loss

    def batchPGLoss(self, inp, target, rewards):
        batch_size, seq_len = inp.size()
        inp = inp.permute(1, 0)
        target = target.permute(1, 0)
        h = self.init_hidden(batch_size)

        loss = 0
        for i in range(seq_len):
            out, h = self.forward(inp[i], h)
            for j in range(batch_size):
                loss += -out[j][target.data[i][j]] * rewards[j]
        return loss / batch_size


class MultiObjectiveDiscriminator(nn.Module):
    def __init__(self, embedding_dim, hidden_dim, vocab_size, max_seq_len, gpu=False, dropout=0.2):
        super(MultiObjectiveDiscriminator, self).__init__()
        self.hidden_dim = hidden_dim
        self.embedding_dim = embedding_dim
        self.max_seq_len = max_seq_len
        self.gpu = gpu

        self.embeddings = nn.Embedding(vocab_size, embedding_dim)
        self.gru = nn.GRU(embedding_dim, hidden_dim, num_layers=2, bidirectional=True, dropout=dropout)

        # 共享特征提取层
        self.shared_layer = nn.Sequential(
            nn.Linear(2 * 2 * hidden_dim, hidden_dim),
            nn.ReLU(),
            nn.Dropout(dropout)
        )

        # 多任务预测头
        self.amp_head = nn.Linear(hidden_dim, 1)  # 抗菌性
        self.tox_head = nn.Linear(hidden_dim, 1)  # 毒性
        self.synth_head = nn.Linear(hidden_dim, 1)  # 合成难度

    def init_hidden(self, batch_size):
        h = torch.zeros(2 * 2 * 1, batch_size, self.hidden_dim)
        return h.cuda() if self.gpu else h

    def forward(self, input, hidden):
        emb = self.embeddings(input)
        emb = emb.permute(1, 0, 2)
        _, hidden = self.gru(emb, hidden)
        hidden = hidden.permute(1, 0, 2).contiguous()
        shared_features = self.shared_layer(hidden.view(-1, 4 * self.hidden_dim))

        amp_out = torch.sigmoid(self.amp_head(shared_features))
        tox_out = torch.sigmoid(self.tox_head(shared_features))
        synth_out = torch.sigmoid(self.synth_head(shared_features))

        return amp_out, tox_out, synth_out

    def batchClassify(self, inp):
        h = self.init_hidden(inp.size()[0])
        amp_out, tox_out, synth_out = self.forward(inp, h)
        return amp_out.view(-1), tox_out.view(-1), synth_out.view(-1)


def prepare_generator_batch(samples, start_letter=0, gpu=False):
    batch_size, seq_len = samples.size()
    inp = torch.zeros(batch_size, seq_len, dtype=torch.long)
    target = samples.clone()
    inp[:, 0] = start_letter
    inp[:, 1:] = target[:, :seq_len - 1]
    return inp.cuda() if gpu else inp, target


def prepare_discriminator_data(pos_samples, neg_samples, gpu=False):
    inp = torch.cat((pos_samples, neg_samples), 0)
    target = torch.ones(pos_samples.size(0) + neg_samples.size(0))
    target[pos_samples.size(0):] = 0

    # 随机打乱
    perm = torch.randperm(target.size(0))
    inp = inp[perm]
    target = target[perm]

    return inp.cuda() if gpu else inp, target.cuda() if gpu else target


def batchwise_sample(gen, num_samples, batch_size):
    samples = []
    for i in range(0, num_samples, batch_size):
        current_batch = min(batch_size, num_samples - i)
        s, _ = gen.sample(current_batch)
        samples.append(s)
    return torch.cat(samples)


def multi_objective_reward(amp_score, tox_score, synth_score):
    """计算多目标奖励"""
    return amp_score - 0.5 * tox_score - 0.3 * synth_score


def calculate_hypervolume(pareto_front):
    """计算超体积指标的简化实现"""
    if len(pareto_front) < 2:
        return 0.0

    # 排序
    sorted_front = sorted(pareto_front, key=lambda x: x[0])

    # 计算超体积
    hv = 0.0
    prev_x, prev_y, prev_z = sorted_front[0]

    for i in range(1, len(sorted_front)):
        x, y, z = sorted_front[i]
        hv += (x - prev_x) * (1 - max(prev_y, prev_z))
        prev_x, prev_y, prev_z = x, y, z

    return hv


def visualize_pareto(pareto_front):
    """可视化帕累托前沿"""
    try:
        import matplotlib.pyplot as plt
        from mpl_toolkits.mplot3d import Axes3D

        fig = plt.figure(figsize=(10, 8))
        ax = fig.add_subplot(111, projection='3d')

        # 提取三个目标值
        x = [p[0] for p in pareto_front]  # 抗菌性
        y = [p[1] for p in pareto_front]  # 毒性
        z = [p[2] for p in pareto_front]  # 合成难度

        ax.scatter(x, y, z, c='r', marker='o')
        ax.set_xlabel('Antimicrobial Activity')
        ax.set_ylabel('Toxicity')
        ax.set_zlabel('Synthetic Difficulty')
        plt.title('Pareto Front of Generated Peptides')
        plt.savefig('pareto_front.png')
        print("帕累托前沿图已保存到 pareto_front.png")
    except ImportError:
        print("无法导入matplotlib，无法可视化帕累托前沿")


class ParetoOptimizer:
    def __init__(self, gen, dis, lambda_amp=1.0, lambda_tox=0.7, lambda_synth=0.3):
        self.gen = gen
        self.dis = dis
        self.lambda_amp = lambda_amp
        self.lambda_tox = lambda_tox
        self.lambda_synth = lambda_synth
        self.pareto_front = []
        self.hv_history = []

    def calculate_rewards(self, samples):
        amp_scores, tox_scores, synth_scores = self.dis.batchClassify(samples)
        rewards = (self.lambda_amp * amp_scores) - \
                  (self.lambda_tox * tox_scores) - \
                  (self.lambda_synth * synth_scores)
        return rewards.detach()

    def nsga2_optimize(self, population_size=30):
        """简化的多目标优化方法"""
        # 生成候选种群
        candidates, _ = self.gen.sample(population_size)

        # 评估每个候选的分数
        amp_scores, tox_scores, synth_scores = self.dis.batchClassify(candidates)

        # 计算多目标得分
        scores = []
        for i in range(population_size):
            scores.append((amp_scores[i].item(), tox_scores[i].item(), synth_scores[i].item()))

        # 简化排序 - 寻找帕累托前沿
        pareto_front = []
        for i in range(population_size):
            is_pareto = True
            for j in range(population_size):
                if i != j:
                    if (amp_scores[j] > amp_scores[i] and tox_scores[j] < tox_scores[i] and synth_scores[j] <
                        synth_scores[i]) or \
                            (amp_scores[j] >= amp_scores[i] and tox_scores[j] <= tox_scores[i] and synth_scores[j] <=
                             synth_scores[i]) and \
                            (amp_scores[j] > amp_scores[i] or tox_scores[j] < tox_scores[i] or synth_scores[j] <
                             synth_scores[i]):
                        is_pareto = False
                        break
            if is_pareto:
                pareto_front.append(scores[i])

        self.pareto_front = pareto_front
        hv = calculate_hypervolume(pareto_front)
        self.hv_history.append(hv)

        print(f"多目标优化完成 - 帕累托前沿大小: {len(pareto_front)}, 超体积: {hv:.4f}")
        return pareto_front


def train_generator_MLE(gen, gen_opt, real_data_samples, epochs):
    for epoch in range(epochs):
        print(f'epoch {epoch + 1}/{epochs} : ', end='', flush=True)
        total_loss = 0
        batch_count = 0

        for i in range(0, len(real_data_samples), BATCH_SIZE):
            # 准备批次数据
            batch_end = min(i + BATCH_SIZE, len(real_data_samples))
            batch_samples = real_data_samples[i:batch_end]
            inp, target = prepare_generator_batch(batch_samples, start_letter=START_LETTER, gpu=CUDA)

            # 训练步骤
            gen_opt.zero_grad()
            loss = gen.batchNLLLoss(inp, target)
            loss.backward()
            gen_opt.step()

            total_loss += loss.item()
            batch_count += 1

            print('.', end='', flush=True)

        # 计算平均损失
        avg_loss = total_loss / batch_count
        print(f' avg_NLL = {avg_loss:.4f}')


def train_generator_PG(gen, gen_opt, pareto_optimizer, num_batches):
    for batch in range(num_batches):
        # 生成样本
        s, _ = gen.sample(BATCH_SIZE)
        inp, target = prepare_generator_batch(s, start_letter=START_LETTER, gpu=CUDA)

        # 计算多目标奖励
        rewards = pareto_optimizer.calculate_rewards(s)

        # 训练步骤
        gen_opt.zero_grad()
        pg_loss = gen.batchPGLoss(inp, target, rewards)
        pg_loss.backward()
        gen_opt.step()

        print(f'PG batch {batch + 1}/{num_batches} - loss: {pg_loss.item():.4f}')


def train_discriminator(discriminator, dis_opt, real_data_samples, generator, d_steps, epochs):
    for d_step in range(d_steps):
        print(f'd-step {d_step + 1}/{d_steps}:')

        for epoch in range(epochs):
            # 生成负样本
            neg_samples, _ = generator.sample(len(real_data_samples))

            # 准备训练数据
            dis_inp, dis_target = prepare_discriminator_data(real_data_samples, neg_samples, gpu=CUDA)

            # 训练步骤
            dis_opt.zero_grad()
            amp_out, _, _ = discriminator(dis_inp, discriminator.init_hidden(dis_inp.size(0)))
            loss_fn = nn.BCELoss()
            loss = loss_fn(amp_out, dis_target.float().unsqueeze(1))
            loss.backward()
            dis_opt.step()

            # 计算准确率
            amp_pred = (amp_out > 0.5).float()
            acc = (amp_pred == dis_target.unsqueeze(1)).float().mean().item()

            print(f'  epoch {epoch + 1}/{epochs} - loss: {loss.item():.4f}, acc: {acc:.4f}')


# 训练参数
CUDA = torch.cuda.is_available()
VOCAB_SIZE = 26
MAX_SEQ_LEN = 18
START_LETTER = 0

# 模型参数
GEN_EMBEDDING_DIM = 3
GEN_HIDDEN_DIM = 128
DIS_EMBEDDING_DIM = 3
DIS_HIDDEN_DIM = 128
BATCH_SIZE = 64

# 训练参数
ADV_TRAIN_EPOCHS = 20  # 减少对抗训练轮次
MLE_TRAIN_EPOCHS = 15  # 减少MLE训练轮次
NUM_PG_BATCHES = 1
D_STEPS = 5
D_EPOCHS = 3

if __name__ == '__main__':
    # 设置随机种子
    torch.manual_seed(203)
    if CUDA:
        torch.cuda.manual_seed(203)
        device = torch.device("cuda")
        print("使用GPU加速")
    else:
        device = torch.device("cpu")
        print("使用CPU")

    # 准备数据
    real_data_samples = torch.tensor(all_data, dtype=torch.long)
    if CUDA:
        real_data_samples = real_data_samples.cuda()

    # 初始化模型
    gen = Generator(GEN_EMBEDDING_DIM, GEN_HIDDEN_DIM, VOCAB_SIZE, MAX_SEQ_LEN, gpu=CUDA).to(device)
    dis = MultiObjectiveDiscriminator(DIS_EMBEDDING_DIM, DIS_HIDDEN_DIM, VOCAB_SIZE, MAX_SEQ_LEN, gpu=CUDA).to(device)
    pareto_optimizer = ParetoOptimizer(gen, dis)

    # 尝试加载预训练模型
    try:
        if os.path.exists('gen_500.pth'):
            gen.load_state_dict(torch.load('gen_500.pth', map_location=device))
            print("加载生成器预训练模型")
        if os.path.exists('dis_500.pth'):
            dis.load_state_dict(torch.load('dis_500.pth', map_location=device))
            print("加载判别器预训练模型")
    except:
        print("预训练模型加载失败，从头开始训练")

    # 优化器
    gen_opt = optim.Adam(gen.parameters(), lr=0.0001)
    dis_opt = optim.Adam(dis.parameters(), lr=0.0001)

    # MLE训练
    print('Starting Generator MLE Training...')
    train_generator_MLE(gen, gen_opt, real_data_samples, MLE_TRAIN_EPOCHS)
    print('Finished Generator MLE Training...')

    # 判别器训练
    print('\nStarting Discriminator Training...')
    train_discriminator(dis, dis_opt, real_data_samples, gen, D_STEPS, D_EPOCHS)

    # 对抗训练
    print('\nStarting Adversarial Training with Multi-Objective Optimization...')
    for epoch in range(ADV_TRAIN_EPOCHS):
        print(f'\n-------- EPOCH {epoch + 1}/{ADV_TRAIN_EPOCHS} --------')

        # 每3个epoch执行多目标优化
        if epoch % 3 == 0:
            pareto_front = pareto_optimizer.nsga2_optimize()
            if len(pareto_front) > 0:
                visualize_pareto(pareto_front)

        # 生成器训练
        print('\nTraining Generator:')
        train_generator_PG(gen, gen_opt, pareto_optimizer, NUM_PG_BATCHES)

        # 判别器训练
        print('\nTraining Discriminator:')
        train_discriminator(dis, dis_opt, real_data_samples, gen, 1, 1)  # 每epoch只训练1步

        # 保存中间模型
        if (epoch + 1) % 5 == 0 or epoch == ADV_TRAIN_EPOCHS - 1:
            torch.save(gen.state_dict(), f'gen_epoch{epoch + 1}.pth')
            torch.save(dis.state_dict(), f'dis_epoch{epoch + 1}.pth')
            print(f"模型已保存: gen_epoch{epoch + 1}.pth, dis_epoch{epoch + 1}.pth")

    # 最终保存
    torch.save(gen.state_dict(), 'gen_final.pth')
    torch.save(dis.state_dict(), 'dis_final.pth')
    print("\n训练完成! 最终模型已保存: gen_final.pth, dis_final.pth")

    # 保存帕累托前沿序列
    pareto_sequences = []
    for amp_score, tox_score, synth_score in pareto_optimizer.pareto_front:
        # 生成示例序列
        sample, _ = gen.sample(1)
        seq = vector_to_sequence(sample[0].cpu().numpy())
        pareto_sequences.append((seq, amp_score, tox_score, synth_score))

    # 保存结果
    with open('pareto_results.txt', 'w') as f:
        f.write("Sequence,AMP_Score,Toxicity_Score,Synthesis_Difficulty\n")
        for seq, amp, tox, synth in pareto_sequences:
            f.write(f"{seq},{amp:.4f},{tox:.4f},{synth:.4f}\n")

    print("帕累托前沿序列已保存到 pareto_results.txt")