import torch
import torch.nn as nn
import torch.optim as optim
from torch.utils.data import Dataset, DataLoader
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import LabelEncoder
import numpy as np
import math


# ========================
# 1. 位置编码 PositionalEncoding
# ========================
class PositionalEncoding(nn.Module):
    def __init__(self, d_model: int, max_len: int = 5000):
        super(PositionalEncoding, self).__init__()
        pe = torch.zeros(max_len, d_model)
        position = torch.arange(0, max_len, dtype=torch.float).unsqueeze(1)
        div_term = torch.exp(torch.arange(0, d_model, 2).float() * (-math.log(10000.0) / d_model))
        pe[:, 0::2] = torch.sin(position * div_term)
        pe[:, 1::2] = torch.cos(position * div_term)
        pe = pe.unsqueeze(0)
        self.register_buffer('pe', pe)

    def forward(self, x):
        x = x + self.pe[:, :x.size(1), :]
        return x


# ========================
# 2. 简化版Transformer模型
# ========================
class SimpleTransformer(nn.Module):
    def __init__(self, vocab_size: int, embed_dim: int, num_classes: int,
                 num_heads: int = 4, num_layers: int = 2, dropout: float = 0.1):
        super(SimpleTransformer, self).__init__()
        self.embedding = nn.Embedding(vocab_size, embed_dim)
        self.pos_encoder = PositionalEncoding(embed_dim)

        encoder_layer = nn.TransformerEncoderLayer(
            d_model=embed_dim,
            nhead=num_heads,
            dim_feedforward=256,
            dropout=dropout
        )
        self.transformer_encoder = nn.TransformerEncoder(encoder_layer, num_layers=num_layers)

        self.classifier = nn.Linear(embed_dim, num_classes)

    def forward(self, src):
        # src shape: [batch_size, seq_len]
        x = self.embedding(src)  # [B, S, E]
        x = self.pos_encoder(x)
        x = x.transpose(0, 1)  # [S, B, E]
        x = self.transformer_encoder(x)
        x = x[0, :, :]  # 取第一个token作为句子表示（类似[CLS]）
        logits = self.classifier(x)
        return logits


# ========================
# 3. 自定义数据集类
# ========================
class TextDataset(Dataset):
    def __init__(self, texts, labels, word_to_idx, max_seq_len=20):
        self.texts = texts
        self.labels = labels
        self.word_to_idx = word_to_idx
        self.max_seq_len = max_seq_len

    def __len__(self):
        return len(self.texts)

    def __getitem__(self, idx):
        text = self.texts[idx]
        label = self.labels[idx]

        tokens = text.split()[:self.max_seq_len]
        token_ids = [self.word_to_idx.get(token, 0) for token in tokens]
        # 填充或截断到固定长度
        token_ids += [0] * (self.max_seq_len - len(token_ids))
        return torch.tensor(token_ids), torch.tensor(label)


# ========================
# 4. 文本预处理函数
# ========================
def preprocess_text(text, word_to_idx, max_seq_len=20):
    """将单个文本转换为模型输入格式"""
    tokens = text.split()[:max_seq_len]
    token_ids = [word_to_idx.get(token, 0) for token in tokens]
    # 填充到固定长度
    token_ids += [0] * (max_seq_len - len(token_ids))
    return torch.tensor(token_ids).unsqueeze(0)  # 添加batch维度


# ========================
# 5. 预测函数
# ========================
def predict_text(model, text, word_to_idx, label_encoder, device, max_seq_len=20):
    """对单个文本进行预测"""
    model.eval()
    with torch.no_grad():
        # 预处理文本
        input_tensor = preprocess_text(text, word_to_idx, max_seq_len)
        input_tensor = input_tensor.to(device)
        
        # 模型预测
        outputs = model(input_tensor)
        probabilities = torch.softmax(outputs, dim=1)
        predicted_class = torch.argmax(outputs, dim=1).item()
        confidence = probabilities[0][predicted_class].item()
        
        # 转换为原始标签
        predicted_label = label_encoder.inverse_transform([predicted_class])[0]
        
        return predicted_label, confidence, probabilities[0].cpu().numpy()


def predict_batch(model, texts, word_to_idx, label_encoder, device, max_seq_len=20, batch_size=32):
    """批量预测多个文本"""
    model.eval()
    predictions = []
    confidences = []
    
    with torch.no_grad():
        for i in range(0, len(texts), batch_size):
            batch_texts = texts[i:i+batch_size]
            
            # 预处理批量文本
            batch_inputs = []
            for text in batch_texts:
                tokens = text.split()[:max_seq_len]
                token_ids = [word_to_idx.get(token, 0) for token in tokens]
                token_ids += [0] * (max_seq_len - len(token_ids))
                batch_inputs.append(token_ids)
            
            input_tensor = torch.tensor(batch_inputs).to(device)
            
            # 模型预测
            outputs = model(input_tensor)
            probabilities = torch.softmax(outputs, dim=1)
            predicted_classes = torch.argmax(outputs, dim=1).cpu().numpy()
            batch_confidences = torch.max(probabilities, dim=1)[0].cpu().numpy()
            
            # 转换为原始标签
            batch_predictions = label_encoder.inverse_transform(predicted_classes)
            
            predictions.extend(batch_predictions)
            confidences.extend(batch_confidences)
    
    return predictions, confidences


# ========================
# 6. 数据准备
# ========================
# 构造一些简单的模拟数据
texts = ["I love programming", "Python is great", "I do not like bugs"] * 1000
labels = [1, 1, 0] * 1000  # 1: positive, 0: negative

# 构建词汇表
word_to_idx = {"<PAD>": 0}
for text in texts:
    for word in text.split():
        if word not in word_to_idx:
            word_to_idx[word] = len(word_to_idx)

# 标签编码
label_encoder = LabelEncoder()
encoded_labels = label_encoder.fit_transform(labels)

# 划分训练/验证集
train_texts, val_texts, train_labels, val_labels = train_test_split(texts, encoded_labels, test_size=0.2,
                                                                    random_state=42)

# 创建DataLoader
max_seq_len = 20
batch_size = 32

train_dataset = TextDataset(train_texts, train_labels, word_to_idx, max_seq_len)
val_dataset = TextDataset(val_texts, val_labels, word_to_idx, max_seq_len)
train_loader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True)
val_loader = DataLoader(val_dataset, batch_size=batch_size, shuffle=False)

# ========================
# 7. 初始化模型、损失函数、优化器
# ========================
vocab_size = len(word_to_idx)
embed_dim = 64
num_classes = len(np.unique(encoded_labels))

model = SimpleTransformer(
    vocab_size=vocab_size,
    embed_dim=embed_dim,
    num_classes=num_classes,
    num_heads=4,
    num_layers=2
)

device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model.to(device)

criterion = nn.CrossEntropyLoss()
optimizer = optim.Adam(model.parameters(), lr=0.001)


# ========================
# 8. 训练与验证函数
# ========================
def train_model(model, criterion, optimizer, train_loader, val_loader, num_epochs=5):
    best_val_acc = 0.0
    for epoch in range(num_epochs):
        model.train()
        running_loss = 0.0
        correct = 0
        total = 0

        for i, (inputs, labels) in enumerate(train_loader):
            inputs, labels = inputs.to(device), labels.to(device)

            optimizer.zero_grad()
            outputs = model(inputs)
            loss = criterion(outputs, labels)
            loss.backward()
            optimizer.step()

            running_loss += loss.item()
            _, predicted = torch.max(outputs, 1)
            total += labels.size(0)
            correct += (predicted == labels).sum().item()

            if i % 50 == 49:
                print(f'[Epoch {epoch + 1}, Batch {i + 1}] Loss: {running_loss / 50:.3f}')
                running_loss = 0.0

        # 验证
        model.eval()
        val_correct = 0
        val_total = 0
        with torch.no_grad():
            for inputs, labels in val_loader:
                inputs, labels = inputs.to(device), labels.to(device)
                outputs = model(inputs)
                _, predicted = torch.max(outputs, 1)
                val_total += labels.size(0)
                val_correct += (predicted == labels).sum().item()

        val_acc = 100 * val_correct / val_total
        print(f'Epoch {epoch + 1} finished | Val Accuracy: {val_acc:.2f}%')
        if val_acc > best_val_acc:
            best_val_acc = val_acc
            torch.save(model.state_dict(), "best_model.pth")

    print("Training complete.")
    print(f"Best Validation Accuracy: {best_val_acc:.2f}%")


# ========================
# 9. 加载训练好的模型
# ========================
def load_trained_model(model_path, model, device):
    """加载训练好的模型权重"""
    try:
        model.load_state_dict(torch.load(model_path, map_location=device))
        model.eval()
        print(f"成功加载模型: {model_path}")
        return True
    except FileNotFoundError:
        print(f"模型文件未找到: {model_path}")
        return False
    except Exception as e:
        print(f"加载模型时出错: {e}")
        return False


# ========================
# 10. 主程序
# ========================
if __name__ == "__main__":
    # 训练模型（如果需要）
    print("开始训练模型...")
    train_model(model, criterion, optimizer, train_loader, val_loader, num_epochs=5)
    
    # 加载最佳模型进行预测
    print("\n加载训练好的模型进行预测...")
    model_loaded = load_trained_model("best_model.pth", model, device)
    
    if model_loaded:
        # 测试单个文本预测
        test_texts = [
            "I love Python programming",
            "This is terrible and awful",
            "Python is amazing",
            "I hate bugs in my code",
            "Programming is fun"
        ]
        
        print("\n=== 单个文本预测结果 ===")
        for text in test_texts:
            predicted_label, confidence, probabilities = predict_text(
                model, text, word_to_idx, label_encoder, device, max_seq_len
            )
            
            sentiment = "正面" if predicted_label == 1 else "负面"
            print(f"文本: '{text}'")
            print(f"预测: {sentiment} (置信度: {confidence:.3f})")
            print(f"概率分布: 负面={probabilities[0]:.3f}, 正面={probabilities[1]:.3f}")
            print("-" * 50)
        
        # 测试批量预测
        print("\n=== 批量预测结果 ===")
        batch_predictions, batch_confidences = predict_batch(
            model, test_texts, word_to_idx, label_encoder, device, max_seq_len
        )
        
        for i, (text, pred, conf) in enumerate(zip(test_texts, batch_predictions, batch_confidences)):
            sentiment = "正面" if pred == 1 else "负面"
            print(f"{i+1}. '{text}' -> {sentiment} (置信度: {conf:.3f})")
    
    else:
        print("无法加载模型，请先训练模型")