import torch
import torch.nn as nn
import torch.optim as optim
from torch.utils.data import Dataset, DataLoader
import numpy as np
import re


# 超参数配置
class Config:
    vocab_size = 1000  # 词汇表大小
    max_len = 32  # 最大序列长度
    d_model = 64  # 模型维度
    n_heads = 4  # 注意力头数
    n_layers = 2  # Transformer层数
    dropout = 0.1  # Dropout率
    batch_size = 8  # 批大小
    lr = 0.001  # 学习率
    epochs = 10  # 训练轮数


# 1. 创建简单数据集（情感分析）
texts = [
    "I love this movie! It's great.",
    "Terrible waste of time. Horrible!",
    "The acting was superb and plot amazing.",
    "Boring and pointless. I hated it.",
    "Best film I've seen this year!",
    "Don't bother watching. Complete trash."
]
labels = [1, 0, 1, 0, 1, 0]  # 1=正面, 0=负面


# 2. 文本预处理
class Tokenizer:
    def __init__(self, vocab_size):
        self.vocab_size = vocab_size
        self.word2idx = {}
        self.idx2word = {}
        self.build_vocab()

    def build_vocab(self):
        words = []
        for text in texts:
            words.extend(re.findall(r'\w+', text.lower()))

        word_counts = {}
        for word in words:
            word_counts[word] = word_counts.get(word, 0) + 1

        sorted_words = sorted(word_counts.items(), key=lambda x: x[1], reverse=True)
        # 保留前vocab_size-2个词（为特殊token预留空间）
        self.word2idx = {'<PAD>': 0, '<UNK>': 1}
        for idx, (word, _) in enumerate(sorted_words[:self.vocab_size - 2]):
            self.word2idx[word] = idx + 2

        self.idx2word = {idx: word for word, idx in self.word2idx.items()}

    def encode(self, text):
        tokens = re.findall(r'\w+', text.lower())
        return [self.word2idx.get(token, 1) for token in tokens]  # 未知词用<UNK>


tokenizer = Tokenizer(Config.vocab_size)


# 3. 数据集类
class TextDataset(Dataset):
    def __init__(self, texts, labels, tokenizer, max_len):
        self.texts = texts
        self.labels = labels
        self.tokenizer = tokenizer
        self.max_len = max_len

    def __len__(self):
        return len(self.texts)

    def __getitem__(self, idx):
        text = self.texts[idx]
        label = self.labels[idx]

        # 编码文本并填充/截断
        tokens = tokenizer.encode(text)
        if len(tokens) > self.max_len:
            tokens = tokens[:self.max_len]
        else:
            tokens = tokens + [0] * (self.max_len - len(tokens))

        return torch.tensor(tokens), torch.tensor(label)


dataset = TextDataset(texts, labels, tokenizer, Config.max_len)
dataloader = DataLoader(dataset, batch_size=Config.batch_size, shuffle=True)


# 4. 定义袖珍Transformer模型
class TransformerBlock(nn.Module):
    def __init__(self, config):
        super().__init__()
        self.ln1 = nn.LayerNorm(config.d_model)
        self.attn = nn.MultiheadAttention(
            embed_dim=config.d_model,
            num_heads=config.n_heads,
            dropout=config.dropout,
            batch_first=True
        )
        self.ln2 = nn.LayerNorm(config.d_model)
        self.ff = nn.Sequential(
            nn.Linear(config.d_model, 4 * config.d_model),
            nn.GELU(),
            nn.Linear(4 * config.d_model, config.d_model),
            nn.Dropout(config.dropout)
        )

    def forward(self, x):
        attn_output, _ = self.attn(x, x, x)
        x = x + attn_output
        x = self.ln1(x)
        ff_output = self.ff(x)
        x = x + ff_output
        return self.ln2(x)


class NanoTransformer(nn.Module):
    def __init__(self, config):
        super().__init__()
        self.embedding = nn.Embedding(config.vocab_size, config.d_model)
        self.pos_embedding = nn.Embedding(config.max_len, config.d_model)
        self.transformer_blocks = nn.Sequential(
            *[TransformerBlock(config) for _ in range(config.n_layers)]
        )
        self.classifier = nn.Sequential(
            nn.Linear(config.d_model, config.d_model),
            nn.Tanh(),
            nn.Linear(config.d_model, 2)
        )

    def forward(self, x):
        positions = torch.arange(0, x.size(1)).unsqueeze(0).to(x.device)
        x_emb = self.embedding(x) + self.pos_embedding(positions)
        transformer_out = self.transformer_blocks(x_emb)
        pooled = transformer_out.mean(dim=1)  # 平均池化
        return self.classifier(pooled)


# 5. 训练准备
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model = NanoTransformer(Config()).to(device)
criterion = nn.CrossEntropyLoss()
optimizer = optim.Adam(model.parameters(), lr=Config.lr)

# 6. 训练循环
print("开始训练袖珍模型...")
for epoch in range(Config.epochs):
    total_loss = 0
    for inputs, labels in dataloader:
        inputs, labels = inputs.to(device), labels.to(device)

        optimizer.zero_grad()
        outputs = model(inputs)
        loss = criterion(outputs, labels)
        loss.backward()
        optimizer.step()

        total_loss += loss.item()

    print(f"Epoch {epoch + 1}/{Config.epochs} | Loss: {total_loss / len(dataloader):.4f}")


# 7. 测试推理
def predict_sentiment(text):
    model.eval()
    with torch.no_grad():
        tokens = tokenizer.encode(text)
        if len(tokens) > Config.max_len:
            tokens = tokens[:Config.max_len]
        else:
            tokens = tokens + [0] * (Config.max_len - len(tokens))

        input_tensor = torch.tensor([tokens]).to(device)
        output = model(input_tensor)
        prob = torch.softmax(output, dim=1)[0]
        return prob[1].item()  # 返回正面情绪概率


# 测试样例
test_texts = [
    "This was an absolutely wonderful performance!",
    "I've never seen something so bad in my life.",
    "Mediocre at best, but had some good moments"
]

print("\n情感分析测试:")
for text in test_texts:
    prob = predict_sentiment(text)
    print(f"文本: {text[:40]}{'...' if len(text) > 40 else ''}")
    print(f"→ 正面概率: {prob:.4f} | 情感: {'正面' if prob > 0.5 else '负面'}")
    print("-" * 60)