import torch
import pandas as pd
from datasets import Dataset
from sklearn.metrics import accuracy_score
from sklearn.model_selection import train_test_split
from transformers import AutoTokenizer
from torch.utils.data import TensorDataset, DataLoader

from transformers import AutoModelForSequenceClassification, AdamW, get_linear_schedule_with_warmup
import logging

# 加载数据
data = pd.read_csv('data/train.csv')

# 划分数据集
train_data, val_data = train_test_split(data, test_size=0.2, random_state=42)


# 定义数据集类
class SentenceDataset(Dataset):
    def __init__(self, dataset, tokenizer, max_length):
        self.dataset = dataset  # 更改变量名为 dataset
        self.tokenizer = tokenizer
        self.max_length = max_length

    def __len__(self):
        return len(self.dataset)

    def __getitem__(self, idx):
        # 检查 idx 是否为列表
        if isinstance(idx, list):
            # 将索引列表转换为 DataFrame，确保即使只有一个索引也能返回 DataFrame
            batch = self.dataset.iloc[idx].reset_index(drop=True)
        else:
            # 即使只取一个索引，也返回 DataFrame
            batch = self.dataset.iloc[idx:idx + 1].reset_index(drop=True)

        # 确保 batch 是 DataFrame 类型
        if not isinstance(batch, pd.DataFrame):
            raise TypeError(f"Batch is not a DataFrame:{batch}")

        questions = batch['question'].tolist()
        syn_questions = batch['syn_question'].tolist()
        # 尝试将标签转换为整数
        labels = batch['label'].tolist()
        try:
            labels = [int(label) for label in labels]
        except ValueError:
            print(f"Error: 文件中的标签无法转换为整数，请检查标签数据。{labels}\n{idx}")
            raise

        encodings = self.tokenizer(
            questions,
            syn_questions,
            add_special_tokens=True,
            max_length=self.max_length,
            padding='max_length',
            truncation=True,
            return_attention_mask=True,
            return_tensors='pt'
        )

        input_ids = encodings['input_ids']
        attention_mask = encodings['attention_mask']

        return {
            'input_ids': input_ids,
            'attention_mask': attention_mask,
            'labels': torch.tensor(labels, dtype=torch.long)
        }


# 初始化 tokenizer 和数据集
tokenizer = AutoTokenizer.from_pretrained("pretrained_model")
max_length = 128
train_dataset = SentenceDataset(train_data, tokenizer, max_length)
val_dataset = SentenceDataset(val_data, tokenizer, max_length)

# 检查数据集大小
print("Train dataset size:", len(train_dataset))
print("Validation dataset size:", len(val_dataset))

# 创建 DataLoader
train_loader = DataLoader(train_dataset, batch_size=128, shuffle=True)
val_loader = DataLoader(val_dataset, batch_size=128, shuffle=False)

####################
# 加载预训练模型
model = AutoModelForSequenceClassification.from_pretrained("pretrained_model", num_labels=2)

# 设置训练参数
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
print(torch.cuda.is_available())
model.to(device)
epochs = 1
learning_rate = 2e-5
optimizer = AdamW(model.parameters(), lr=learning_rate)
total_steps = len(train_loader) * epochs
scheduler = get_linear_schedule_with_warmup(optimizer, num_warmup_steps=0, num_training_steps=total_steps)

# 定义提前停止训练参数
patience = 5  # 连续多少轮未改进则停止
min_delta = 0.01  # 最小改进幅度
best_accuracy = 0.0  # 最佳验证准确率
no_improvement_count = 0  # 未改进的轮次数

# 设置日志
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
# 训练循环
for epoch in range(epochs):
    logging.info(f"开始第 {epoch + 1} 轮训练：")
    model.train()
    total_train_loss = 0
    batch_count = 0
    for step, batch in enumerate(train_loader):
        batch_count = batch_count + 1

        input_ids = batch['input_ids'].to(device)
        attention_mask = batch['attention_mask'].to(device)
        labels = batch['labels'].to(device)

        optimizer.zero_grad()

        try:
            outputs = model(input_ids, attention_mask=attention_mask, labels=labels)
            loss = outputs.loss
            total_train_loss += loss.item()
            logging.info(f"第 {step + 1} 个批次 | 损失: {loss.item():.4f}")
            loss.backward()
            torch.nn.utils.clip_grad_norm_(model.parameters(), 1.0)
            optimizer.step()
            scheduler.step()
        except Exception as e:
            logging.error(f"训练过程中出现异常：{e}")
            continue

    avg_train_loss = total_train_loss / len(train_loader)
    logging.info(f"第 {epoch + 1} 轮训练结束, 平均损失: {avg_train_loss:.4f}")

    # 评估模型
    model.eval()
    all_preds = []
    all_labels = []
    with torch.no_grad():
        print(f"开始第 {epoch + 1}/{epochs} 轮评估验证：")
        for batch in val_loader:
            input_ids = batch['input_ids'].to(device)
            attention_mask = batch['attention_mask'].to(device)
            labels = batch['labels'].to(device)

            outputs = model(input_ids, attention_mask=attention_mask)
            _, preds = torch.max(outputs.logits, dim=1)

            all_preds.extend(preds.cpu().numpy())
            all_labels.extend(labels.cpu().numpy())

    accuracy = accuracy_score(all_labels, all_preds)
    logging.info(f"第 {epoch + 1}/{epochs} 轮 | 训练损失: {avg_train_loss:.4f} | 验证准确率: {accuracy:.4f}")

    # 检查是否达到早期停止条件
    if accuracy > best_accuracy + min_delta:
        best_accuracy = accuracy
        no_improvement_count = 0
        torch.save(model.state_dict(), f'best_model_epoch_{epoch + 1}.pth')  # 保存最佳模型
    else:
        no_improvement_count += 1
        if no_improvement_count >= patience:
            logging.info(f"连续 {patience} 轮未改进，提前结束训练。")
            break


# 保存模型
def save_model(model, output_dir):
    # 确保所有张量都是连续的
    for param in model.parameters():
        param.data = param.data.contiguous()
    model_to_save = model.module if hasattr(model, 'module') else model  # Take care of distributed/parallel training
    model_to_save.save_pretrained(output_dir)
    tokenizer.save_pretrained(output_dir)
    logging.info(f"模型已保存到 {output_dir}")


# 指定输出目录
output_dir = 'bert_model'

# 在训练循环结束后保存模型
save_model(model, output_dir)
