import paddle
import paddle.nn as nn
from paddlenlp.transformers import ErnieTokenizer, ErnieModel
from paddlenlp.data import Pad, Stack, Tuple
from paddle.io import Dataset, DataLoader
import numpy as np

# 1. 定义数据集
class TextDataset(Dataset):
    def __init__(self, texts, labels, tokenizer, max_seq_len=128):
        self.texts = texts
        self.labels = labels
        self.tokenizer = tokenizer
        self.max_seq_len = max_seq_len
        
    def __len__(self):
        return len(self.texts)
    
    def __getitem__(self, idx):
        text = self.texts[idx]
        label = self.labels[idx]
        
        # 使用TransformerTokenizer进行分词
        encoded_inputs = self.tokenizer(text, max_seq_len=self.max_seq_len)
        
        # 返回分词后的结果和标签
        return encoded_inputs['input_ids'], encoded_inputs['token_type_ids'], label

# 2. 定义模型：基于ERNIE的文本分类器
class TextClassifier(nn.Layer):
    def __init__(self, pretrained_model, num_classes=2, dropout=None):
        super(TextClassifier, self).__init__()
        self.ptm = pretrained_model
        self.dropout = nn.Dropout(dropout if dropout is not None else 0.1)
        self.classifier = nn.Linear(self.ptm.config["hidden_size"], num_classes)
        
    def forward(self, input_ids, token_type_ids=None):
        # 获取预训练模型的输出
        _, pooled_output = self.ptm(input_ids=input_ids, token_type_ids=token_type_ids)
        # 应用dropout和分类器
        pooled_output = self.dropout(pooled_output)
        logits = self.classifier(pooled_output)
        return logits

# 3. 数据处理函数
def create_dataloader(dataset, tokenizer, batch_size=32, shuffle=False):
    # 定义批处理函数
    batchify_fn = lambda samples, fn=Tuple(
        Pad(axis=0, pad_val=tokenizer.pad_token_id),  # input_ids
        Pad(axis=0, pad_val=tokenizer.pad_token_type_id),  # token_type_ids
        Stack(dtype="int64")  # labels
    ): fn(samples)
    
    # 创建数据加载器
    data_loader = DataLoader(
        dataset=dataset,
        batch_size=batch_size,
        collate_fn=batchify_fn,
        shuffle=shuffle,
        return_list=True
    )
    return data_loader

# 4. 训练函数
def train(model, train_dataloader, dev_dataloader, epochs=3, learning_rate=5e-5):
    # 定义优化器和损失函数
    optimizer = paddle.optimizer.AdamW(
        learning_rate=learning_rate,
        parameters=model.parameters()
    )
    criterion = nn.CrossEntropyLoss()
    
    # 训练循环
    for epoch in range(epochs):
        model.train()
        total_loss = 0
        for batch in train_dataloader:
            input_ids, token_type_ids, labels = batch
            logits = model(input_ids, token_type_ids)
            loss = criterion(logits, labels)
            loss.backward()
            optimizer.step()
            optimizer.clear_grad()
            total_loss += loss.numpy()
            
        # 验证
        model.eval()
        correct = 0
        total = 0
        with paddle.no_grad():
            for batch in dev_dataloader:
                input_ids, token_type_ids, labels = batch
                logits = model(input_ids, token_type_ids)
                predictions = paddle.argmax(logits, axis=1)
                correct += (predictions == labels).sum().numpy()
                total += labels.shape[0]
                
        print(f"Epoch {epoch+1}/{epochs}, Loss: {total_loss/len(train_dataloader):.4f}, Acc: {correct/total:.4f}")

# 5. 主函数
def main():
    # 加载预训练模型和分词器
    tokenizer = ErnieTokenizer.from_pretrained('ernie-tiny')
    ernie_model = ErnieModel.from_pretrained('ernie-tiny')
    
    # 示例数据（实际应用中替换为真实数据）
    texts = [
        "这是一个积极的句子，表达喜悦之情。",
        "这是一个消极的句子，表达悲伤的情绪。",
        "今天天气真好，适合出去游玩。",
        "遇到了一些麻烦，心情很糟糕。"
    ]
    labels = [1, 0, 1, 0]  # 1:积极, 0:消极
    
    # 创建数据集和数据加载器
    dataset = TextDataset(texts, labels, tokenizer)
    dataloader = create_dataloader(dataset, tokenizer, batch_size=2)
    
    # 创建模型
    model = TextClassifier(ernie_model, num_classes=2)
    
    # 训练模型（示例中仅使用相同数据作为验证集）
    train(model, dataloader, dataloader)
    
    # 保存模型
    paddle.save(model.state_dict(), 'text_classifier.pdparams')
    print("模型训练完成并保存!")

if __name__ == "__main__":
    main()