import os
import json
import csv
import ast
import numpy as np
from tqdm import tqdm
from sklearn.model_selection import train_test_split
from sklearn.metrics import f1_score, accuracy_score
import torch
import torch.nn as nn
import torch.optim as optim
from torch.utils.data import DataLoader, TensorDataset
from transformers import AutoTokenizer
from transformer_model import MiniTransformerClassifier  # 导入上方定义

# 参数配置
model_dir = "/home/xuyd/.cache/huggingface/hub/models--deepseek-ai--deepseek-moe-16b-chat/snapshots/eefd8ac7e8dc90e095129fe1a537d5e236b2e57c"
sentences_file = '/home/xuyd/llm_infer/deepseek/expert_predicter/economics/generated_sentences_new.json'
layers_dir = '/home/xuyd/llm_infer/deepseek/expert_predicter/economics/layers_output/'
output_dir = '/home/xuyd/llm_infer/deepseek/test/project0/transformer_model/'
metrics_file = os.path.join(output_dir, 'training_metrics.csv')

num_classes = 64
max_length = 512
batch_size = 32
test_size = 0.2
random_state = 0
epochs = 30

os.makedirs(output_dir, exist_ok=True)

tokenizer = AutoTokenizer.from_pretrained(model_dir, trust_remote_code=True)

with open(sentences_file, "r") as f:
    data = json.load(f)
sentences = [step.replace("\n", "\\n") for item in data for step in item["step_by_step"]]

# tokenizer.encode 会自动 pad 到 max_length
encoded_inputs = tokenizer(sentences, padding='max_length', truncation=True, max_length=max_length, return_tensors='pt')
input_ids = encoded_inputs['input_ids']  # 不再归一化
input_ids = input_ids.clamp(max=tokenizer.vocab_size - 1)  # 👈 防止 token 越界

# 如果保存为 numpy，需要在重新加载后转 long
input_ids = input_ids.long()

# print("最大 token id:", input_ids.max().item(), "vocab_size:", tokenizer.vocab_size)

def train_and_evaluate(layer_num, input_ids, multilabels):
    X_train, X_test, y_train, y_test = train_test_split(input_ids, multilabels, test_size=test_size, random_state=random_state)

    train_dataset = TensorDataset(torch.tensor(X_train, dtype=torch.long), torch.tensor(y_train, dtype=torch.float32))
    test_dataset = TensorDataset(torch.tensor(X_test, dtype=torch.long), torch.tensor(y_test, dtype=torch.float32))

    train_loader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True)
    test_loader = DataLoader(test_dataset, batch_size=batch_size, shuffle=False)

    device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
    model = MiniTransformerClassifier(vocab_size=tokenizer.vocab_size, num_classes=num_classes).to(device)

    criterion = nn.BCEWithLogitsLoss()
    optimizer = optim.Adam(model.parameters(), lr=0.001)

    best_f1 = 0
    metrics = []

    for epoch in range(epochs):
        model.train()
        total_loss = 0
        for inputs, labels in train_loader:
            inputs, labels = inputs.to(device), labels.to(device)
            optimizer.zero_grad()
            logits = model(inputs)
            loss = criterion(logits, labels)
            loss.backward()
            optimizer.step()
            total_loss += loss.item()
        avg_loss = total_loss / len(train_loader)

        # 评估
        model.eval()
        all_preds, all_labels = [], []
        with torch.no_grad():
            for inputs, labels in test_loader:
                inputs = inputs.to(device)
                logits = model(inputs)
                preds = torch.topk(torch.sigmoid(logits), k=6, dim=1)[1]
                pred_labels = torch.zeros_like(logits)
                pred_labels.scatter_(1, preds, 1)
                all_preds.extend(pred_labels.cpu().numpy())
                all_labels.extend(labels.numpy())

        f1 = f1_score(np.array(all_labels), np.array(all_preds), average='samples')
        acc = (np.array(all_labels) * np.array(all_preds)).sum() / (6 * len(all_labels))

        metrics.append({
            'layer': layer_num, 'epoch': epoch + 1,
            'loss': avg_loss, 'accuracy': acc, 'f1_score': f1
        })

        if f1 > best_f1:
            best_f1 = f1
            torch.save(model.state_dict(), f'{output_dir}transformer_layer{layer_num}.pth')

    return metrics

# 写 header
with open(metrics_file, 'w', newline='') as f:
    writer = csv.DictWriter(f, fieldnames=['layer', 'epoch', 'loss', 'accuracy', 'f1_score'])
    writer.writeheader()

# 每层处理
for layer_num in tqdm(range(1, 28), desc="处理各层"):
    layer_file = f'{layers_dir}layer_{layer_num}.csv'
    with open(layer_file, 'r') as file:
        arrays = [ast.literal_eval(line.strip()) for line in file if line.strip()]
    multilabel = np.zeros((len(arrays), num_classes), dtype=np.float32)
    for i, array in enumerate(arrays):
        for num in array:
            if isinstance(num, (int, np.integer)) and 0 <= num < num_classes:
                multilabel[i, int(num)] = 1

    metrics = train_and_evaluate(layer_num, input_ids.numpy(), multilabel)
    with open(metrics_file, 'a', newline='') as f:
        writer = csv.DictWriter(f, fieldnames=['layer', 'epoch', 'loss', 'accuracy', 'f1_score'])
        writer.writerows(metrics)

print("✅ 所有层训练完成，模型已保存。")
