import os
import numpy as np
import matplotlib.pyplot as plt
os.environ['HF_ENDPOINT'] = 'https://hf-mirror.com'
import torch
import torch.optim as optim
from transformers import AutoProcessor
from torch.utils.tensorboard import SummaryWriter

from models.wave_model import MainBackbone, ContrastiveLoss, freeze
from utils.util import getEEGWaveDataloader, load_waves, getEEGValidDataloader, similarity_function

def train(model, train_loader, optimizer, criterion, scheduler, device, epoch, loss_history):
    model.train()
    train_loss = 0.0
    step_counter = 0
    for wave, eeg_feature in train_loader:
        wave = wave.to(device)
        eeg_feature = eeg_feature.to(device)
        q_wave, q_eeg = model(wave, eeg_feature)
        loss = criterion(q_wave, q_eeg)

        optimizer.zero_grad()
        loss.backward()
        optimizer.step()
        train_loss += loss.item()
        step_counter += 1

    scheduler.step()
    epoch_loss = train_loss / len(train_loader)
    writer.add_scalar('Loss/Train', epoch_loss, epoch)
    print(f'Epoch {epoch + 1}, Train Loss: {train_loss:.4f}, Epoch Loss: {epoch_loss:.4f}')
    loss_history.append(train_loss)


class_num = 30
batch_size = 128
gpu_id = 4
epochs = 10
temperature = 0.5
is_train = True
is_load_model = False
device = torch.device(f"cuda:{gpu_id}" if torch.cuda.is_available() else "cpu")
model = MainBackbone(input_dim=57, output_dim=256).to(device)
# model = freeze(model)
processor = AutoProcessor.from_pretrained('facebook/seamless-m4t-v2-large')
if is_load_model:
    model.load_state_dict(torch.load(os.path.join('/root/data/video_decoding', "model.pth"), map_location=device))

# 训练模型
writer = SummaryWriter('runs/class_30_zscore')
loss_val_min = np.inf
if is_train:
    # 加载数据
    train_loader = getEEGWaveDataloader(batch_size, processor)
    optimizer = optim.Adam(model.parameters(), lr=0.001)
    criterion = ContrastiveLoss(batch_size, device)
    scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, epochs)
    loss_history = []
    for epoch in range(epochs):
        train(model, train_loader, optimizer, criterion, scheduler, device, epoch, loss_history)
        if epoch > 3:
            torch.save(model.state_dict(), os.path.join('/root/data/video_decoding', "model.pth"))

    writer.close()

# 分类任务
waves = load_waves(processor) # 加载所有音频
waves = waves.to(device)
test_loader = getEEGValidDataloader(batch_size, class_num)
similarities = []
correct_val = 0
total_val = 0
model.eval()
with torch.no_grad():
    wave_features = model.wave(waves)
    for eeg, truth in test_loader:
        eeg = eeg.to(device)
        target = truth.to(device)
        eeg_feature = model.encoder(eeg)
        results = similarity_function(eeg_feature, wave_features).to(device)
        correct_val += (results == target).sum().float()
        total_val += len(target)
    epoch_acc = correct_val / total_val
    print(f'val acc: {epoch_acc:.4f}')
