# train.py
import numpy as np
import torch
from transformers import BertTokenizer, BertForSequenceClassification, Trainer, TrainingArguments
from sklearn.model_selection import train_test_split
import pandas as pd

# 检查NumPy版本
print("NumPy 版本:", np.__version__)

# 检查是否有可用的GPU设备
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
print(f"Device: {device}")

# 加载预训练的BERT模型和tokenizer
model_name = "bert-base-chinese"
tokenizer = BertTokenizer.from_pretrained(model_name)
model = BertForSequenceClassification.from_pretrained(model_name, num_labels=3).to(device)  # 将模型加载到设备上

# 从.tsv文件中加载数据集
def load_tsv_dataset(file_path):
    df = pd.read_csv(file_path, sep='\t')
    texts = df['text_a'].tolist()
    labels = df['label'].tolist()
    return texts, labels

# 准备数据集，假设数据集存储在train.tsv中
file_path = 'train.tsv'  # 确保这里是正确的文件路径
texts, labels = load_tsv_dataset(file_path)

# 划分训练集和验证集
train_texts, val_texts, train_labels, val_labels = train_test_split(texts, labels, test_size=0.2, random_state=42)

# 将文本转换为BERT模型输入格式
train_encodings = tokenizer(train_texts, truncation=True, padding=True, return_tensors='pt')
val_encodings = tokenizer(val_texts, truncation=True, padding=True, return_tensors='pt')

# 定义自定义数据集类
class MyDataset(torch.utils.data.Dataset):
    def __init__(self, encodings, labels):
        self.encodings = encodings
        self.labels = labels

    def __getitem__(self, idx):
        item = {key: val[idx] for key, val in self.encodings.items()}
        item['labels'] = self.labels[idx]
        return item

    def __len__(self):
        return len(self.labels)

# 创建训练集和验证集
train_dataset = MyDataset(train_encodings, torch.tensor(train_labels))
val_dataset = MyDataset(val_encodings, torch.tensor(val_labels))

# 定义训练参数
training_args = TrainingArguments(
    output_dir='./results',  # 指定输出目录，用于保存模型和训练日志
    per_device_train_batch_size=8,  # 设置训练批次大小
    per_device_eval_batch_size=8,  # 设置验证批次大小
    num_train_epochs=3,  # 设置训练轮数
    logging_dir='./logs',  # 指定日志目录
    logging_steps=100,  # 日志记录步数
    overwrite_output_dir=True,  # 如果输出目录已经存在，则覆盖
    eval_strategy="epoch",  # 在每个epoch结束时进行评估
)

# 初始化Trainer
trainer = Trainer(
    model=model,
    args=training_args,
    train_dataset=train_dataset,
    eval_dataset=val_dataset,
)

# 开始训练
trainer.train()

# 保存模型和tokenizer
model_path = './results/checkpoint-final'
model.save_pretrained(model_path)
tokenizer.save_pretrained(model_path)

# 加载训练后的模型进行预测
model = BertForSequenceClassification.from_pretrained(model_path).to(device)
tokenizer = BertTokenizer.from_pretrained(model_path)

# 定义情感预测函数
def predict_sentiment(text):
    inputs = tokenizer(text, return_tensors="pt").to(device)
    outputs = model(**inputs)
    logits = outputs.logits
    predictions = torch.softmax(logits, dim=1).detach().cpu().numpy()
    return predictions

# 示例预测
example_text = "这是一个很好的产品"
predicted_sentiment = predict_sentiment(example_text)
print(f"文本 '{example_text}' 的情感预测结果: {predicted_sentiment}")
