import pandas as pd
import numpy as np
import mindspore as ms
from mindspore import nn, ops, Tensor
from mindspore.dataset import GeneratorDataset
from mindspore.train import Model
from mindspore.train.callback import LossMonitor, TimeMonitor
from mindspore.nn.loss import BCELoss
from mindspore.nn.optim import AdamWeightDecay
from mindnlp.transformers import BertTokenizer, BertConfig, BertModel
import os
from sklearn.model_selection import train_test_split
from sklearn.metrics import classification_report, roc_auc_score
import mindspore.train.serialization as serialization

# 设定 MindSpore 运行模式 (CPU)
ms.set_context(device_target="CPU", mode=ms.PYNATIVE_MODE)

# 2. 数据准备
try:
    df_train = pd.read_csv('train.csv.zip')
    print("训练数据加载成功，数据预览:")
    print(df_train.head())
except FileNotFoundError:
    print("错误: train.csv 文件未找到，请确保文件在当前工作目录下。")
    exit()

# 数据预处理函数
def preprocess_data(df, tokenizer, max_length=128):
    prompts = df['Prompt'].tolist()
    answers = df['Answer'].tolist()
    targets = df['Target'].tolist()

    input_ids_list = []
    attention_mask_list = []
    token_type_ids_list = []
    labels = []

    for prompt, answer, target in zip(prompts, answers, targets):
        combined_text = str(prompt) + " [SEP] " + str(answer)

        encoded_dict = tokenizer.encode_plus(
            combined_text,
            add_special_tokens=True,
            max_length=max_length,
            padding='max_length',
            truncation=True,
            return_attention_mask=True,
            return_token_type_ids=True,
            return_tensors='np',
        )

        input_ids_list.append(encoded_dict['input_ids'])
        attention_mask_list.append(encoded_dict['attention_mask'])
        token_type_ids_list.append(encoded_dict['token_type_ids'])
        labels.append(target)

    return input_ids_list, attention_mask_list, token_type_ids_list, labels

# 加载 BERT tokenizer
tokenizer = BertTokenizer.from_pretrained('bert-base-chinese')

# 划分训练集和验证集
train_df, val_df = train_test_split(df_train, test_size=0.2, random_state=42)

# 预处理训练集和验证集
train_input_ids, train_attention_masks, train_token_type_ids, train_labels = preprocess_data(train_df, tokenizer)
val_input_ids, val_attention_masks, val_token_type_ids, val_labels = preprocess_data(val_df, tokenizer)

# 创建 MindSpore Dataset
class HallucinationDataset:
    def __init__(self, input_ids, attention_masks, token_type_ids, labels):
        self.input_ids = input_ids
        self.attention_masks = attention_masks
        self.token_type_ids = token_type_ids
        self.labels = labels

    def __len__(self):
        return len(self.input_ids)

    def __getitem__(self, idx):
        numpy_input_ids = self.input_ids[idx]
        numpy_attention_mask = self.attention_masks[idx]
        numpy_token_type_ids = self.token_type_ids[idx]

        numpy_input_ids = np.squeeze(numpy_input_ids, axis=0)
        numpy_attention_mask = np.squeeze(numpy_attention_mask, axis=0)
        numpy_token_type_ids = np.squeeze(numpy_token_type_ids, axis=0)

        input_ids = Tensor(numpy_input_ids, ms.int64)
        attention_mask = Tensor(numpy_attention_mask, ms.float32)  # 修改为 float32
        token_type_ids = Tensor(numpy_token_type_ids, ms.int32)
        labels = Tensor(np.array([self.labels[idx]]), ms.float32)

        return (input_ids, attention_mask, token_type_ids, labels)

train_dataset = HallucinationDataset(train_input_ids, train_attention_masks, train_token_type_ids, train_labels)
val_dataset = HallucinationDataset(val_input_ids, val_attention_masks, val_token_type_ids, val_labels)

# 使用 GeneratorDataset 创建 MindSpore 数据集
train_data = GeneratorDataset(train_dataset, column_names=["input_ids", "attention_mask", "token_type_ids", "label"], shuffle=True)
val_data = GeneratorDataset(val_dataset, column_names=["input_ids", "attention_mask", "token_type_ids", "label"], shuffle=False)

batch_size = 32
train_data = train_data.batch(batch_size)
val_data = val_data.batch(batch_size)

# 3. 定义 BERT 模型
class BertForSequenceClassification(nn.Cell):
    def __init__(self, bert_config, num_labels):
        super().__init__()
        self.bert = BertModel(bert_config)
        self.dropout = nn.Dropout(p=0.1)
        self.classifier = nn.Dense(bert_config.hidden_size, num_labels)
        self.sigmoid = ops.Sigmoid()

    def construct(self, input_ids, attention_mask, token_type_ids):
        # 获取 BERT 输出
        outputs = self.bert(input_ids=input_ids, token_type_ids=token_type_ids, attention_mask=attention_mask)
        # 提取池化输出（假设 outputs 是元组，第二个元素为 pooled_output）
        pooled_output = outputs[1]
        
        # 确保 pooled_output 是 Tensor
        if not isinstance(pooled_output, Tensor):
            raise TypeError(f"pooled_output 必须是 Tensor，但实际类型为 {type(pooled_output)}")

        pooled_output = self.dropout(pooled_output)
        logits = self.classifier(pooled_output)
        probs = self.sigmoid(logits)
        return probs

# 加载 BERT 配置
config = BertConfig(
    vocab_size=21128,
    hidden_size=768,
    num_hidden_layers=12,
    num_attention_heads=12,
    intermediate_size=3072,
    hidden_act="gelu",
    hidden_dropout_prob=0.1,
    attention_probs_dropout_prob=0.1,
    max_position_embeddings=512,
    type_vocab_size=2,
    initializer_range=0.02,
    layer_norm_eps=1e-12,
    pad_token_id=0,
    position_embedding_type="absolute",
    use_cache=True,
    classifier_dropout=None,
)

# 加载预训练配置和模型
config = BertConfig.from_pretrained('bert-base-chinese')
model = BertForSequenceClassification(config, num_labels=1)

# 4. 定义损失函数和优化器
loss_fn = BCELoss()
optimizer = AdamWeightDecay(params=model.trainable_params(), learning_rate=2e-5, weight_decay=0.01)

# 5. 训练模型
def forward_fn(input_ids, attention_mask, token_type_ids, labels):
    probs = model(input_ids, attention_mask, token_type_ids)
    loss = loss_fn(probs, labels)
    return loss, probs

grad_fn = ops.value_and_grad(forward_fn, grad_position=None, weights=optimizer.parameters)

def train_step(input_ids, attention_mask, token_type_ids, labels):
    (loss, probs), grads = grad_fn(input_ids, attention_mask, token_type_ids, labels)
    loss = ops.depend(loss, optimizer(grads))
    return loss, probs

epochs = 3
model.set_train()
loss_monitor = LossMonitor(100)
time_monitor = TimeMonitor()

print("开始训练...")
for epoch in range(epochs):
    for batch in train_data.create_tuple_iterator():
        input_ids, attention_mask, token_type_ids, labels = batch
        loss, probs = train_step(input_ids, attention_mask, token_type_ids, labels)
    print(f"Epoch {epoch + 1}/{epochs}, Loss: {loss.asnumpy():.4f}")

print("训练完成!")

checkpoint_path = "model_checkpoint.ckpt"  # 你可以指定保存的路径和文件名
serialization.save_checkpoint(model, checkpoint_path)
print(f"模型参数已保存到 {checkpoint_path}")

# 6. 模型评估
print("\n开始模型评估...")

# 加载保存的模型（如果需要）
# param_dict = ms.load_checkpoint(checkpoint_path)
# ms.load_param_into_net(model, param_dict)

model.set_train(False)  # 设置为评估模式

all_preds = []
all_probs = []
all_labels = []

for batch in val_data.create_tuple_iterator():
    input_ids, attention_mask, token_type_ids, labels = batch
    probs = model(input_ids, attention_mask, token_type_ids)
    
    # 收集结果
    all_probs.extend(probs.asnumpy().flatten())
    all_labels.extend(labels.asnumpy().flatten())

# 将概率转换为类别预测（阈值0.5）
all_preds = [1 if prob > 0.5 else 0 for prob in all_probs]

# 计算评估指标
print("\n验证集评估结果:")
print(classification_report(all_labels, all_preds))
print(f"AUC分数: {roc_auc_score(all_labels, all_probs):.4f}")

# 计算正确率
accuracy = sum([1 if pred == label else 0 for pred, label in zip(all_preds, all_labels)]) / len(all_labels)
print(f"\n正确率: {accuracy:.4f}")