import torch
import torch.nn as nn
from transformers import BertTokenizer, BertModel

# 定义TextCNN模型
class TextCNN(nn.Module):
    def __init__(self, vocab_size, embedding_dim, num_filters, filter_sizes, output_dim, dropout):
        super().__init__()
        self.embedding = nn.Embedding(vocab_size, embedding_dim)
        self.convs = nn.ModuleList([
            nn.Conv2d(in_channels=1, out_channels=num_filters, kernel_size=(fs, embedding_dim))
            for fs in filter_sizes
        ])
        self.fc = nn.Linear(len(filter_sizes) * num_filters, output_dim)
        self.dropout = nn.Dropout(dropout)

    def forward(self, text):
        embedded = self.embedding(text)
        embedded = embedded.unsqueeze(1)
        conved = [torch.relu(conv(embedded)).squeeze(3) for conv in self.convs]
        pooled = [torch.max(conv, dim=2)[0] for conv in conved]
        cat = self.dropout(torch.cat(pooled, dim=1))
        return self.fc(cat)


# 定义完整模型
class CombinedModel(nn.Module):
    def __init__(self, user_vocab_size, product_vocab_size, embedding_dim=768, num_filters=100, filter_sizes=[3, 4, 5], output_dim=10, dropout=0.5):
        super().__init__()
        self.user_textcnn = TextCNN(user_vocab_size, embedding_dim, num_filters, filter_sizes, output_dim, dropout)
        self.product_textcnn = TextCNN(product_vocab_size, embedding_dim, num_filters, filter_sizes, output_dim, dropout)

    def forward(self, user_text, product_text):
        user_output = self.user_textcnn(user_text)
        product_output = self.product_textcnn(product_text)
        return user_output, product_output


# 模拟数据
user_texts = ["user text example 1", "user text example 2", "user text example 3", "user text example 4"]
product_texts = ["product text example 1", "product text example 2", "product text example 3", "product text example 4"]
# 将标签转换为torch.long类型
user_labels = torch.tensor([0, 1, 0, 1], dtype=torch.long)
product_labels = torch.tensor([1, 0, 1, 0], dtype=torch.long)

# 使用BERT的分词器
tokenizer = BertTokenizer.from_pretrained('bert-base-chinese')


# 构建词汇表
def yield_tokens(data_iter):
    for text in data_iter:
        yield tokenizer.encode(text)


def build_vocab_manually(data_iter, specials=["<unk>"]):
    word_to_index = {s: i for i, s in enumerate(specials)}
    index = len(specials)
    for tokens in data_iter:
        for token in tokens:
            if token not in word_to_index:
                word_to_index[token] = index
                index += 1
    return word_to_index

user_word_to_index = build_vocab_manually(yield_tokens(user_texts))
user_vocab_size = len(user_word_to_index)
product_word_to_index = build_vocab_manually(yield_tokens(product_texts))
product_vocab_size = len(product_word_to_index)


# 定义数据处理函数
def text_pipeline(text, word_to_index):
    tokens = tokenizer.encode(text)
    # 确保转换为torch.long类型
    return torch.tensor([word_to_index[token] for token in tokens], dtype=torch.long)


# 构建数据集（简化，不再使用torchtext的相关结构）
user_data = [(text_pipeline(text, user_word_to_index), label) for text, label in zip(user_texts, user_labels)]
product_data = [(text_pipeline(text, product_word_to_index), label) for text, label in zip(product_texts, product_labels)]

# 划分训练集和测试集（简化实现）
split_idx_user = int(len(user_data) * 0.8)
user_train = user_data[:split_idx_user]
user_test = user_data[split_idx_user:]

split_idx_product = int(len(product_data) * 0.8)
product_train = product_data[:split_idx_product]
product_test = product_data[split_idx_product:]

# 打印部分数据和标签进行检查
print("User data sample:", user_data[0])
print("Product data sample:", product_data[0])

# 初始化模型、损失函数和优化器
model = CombinedModel(user_vocab_size, product_vocab_size)
criterion = nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(model.parameters(), lr=0.001, weight_decay=0.0001)
# 使用 StepLR 调度器
scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=10, gamma=0.1)

# 训练模型
num_epochs = 100
for epoch in range(num_epochs):
    model.train()
    user_train_loss = 0
    product_train_loss = 0

    for user_sample, product_sample in zip(user_train, product_train):
        user_text, user_label = user_sample
        product_text, product_label = product_sample

        user_text = user_text.unsqueeze(0)  # 添加批次维度
        product_text = product_text.unsqueeze(0)  # 添加批次维度

        optimizer.zero_grad()

        user_output, product_output = model(user_text, product_text)

        user_loss = criterion(user_output, user_label.unsqueeze(0))  # 添加批次维度
        product_loss = criterion(product_output, product_label.unsqueeze(0))  # 添加批次维度

        loss = user_loss + product_loss
        loss.backward()
        optimizer.step()

        user_train_loss += user_loss.item()
        product_train_loss += product_loss.item()

    # 更新学习率
    scheduler.step()

    print(f'Epoch: {epoch + 1}, User Loss: {user_train_loss / len(user_train)}, Product Loss: {product_train_loss / len(product_train)}')


# 测试模型
model.eval()
user_test_loss = 0
product_test_loss = 0
user_correct = 0
product_correct = 0
total_user = 0
total_product = 0

with torch.no_grad():
    for user_sample, product_sample in zip(user_test, product_test):
        user_text, user_label = user_sample
        product_text, product_label = product_sample

        user_text = user_text.unsqueeze(0)  # 添加批次维度
        product_text = product_text.unsqueeze(0)  # 添加批次维度

        user_output, product_output = model(user_text, product_text)

        user_loss = criterion(user_output, user_label.unsqueeze(0))  # 添加批次维度
        product_loss = criterion(product_output, product_label.unsqueeze(0))  # 添加批次维度

        user_test_loss += user_loss.item()
        product_test_loss += product_loss.item()

        user_predicted = torch.argmax(user_output, dim=1)
        product_predicted = torch.argmax(product_output, dim=1)

        # 检查 user_label 和 product_label 是否为标量
        if user_label.dim() == 0:
            total_user += 1
            user_correct += (user_predicted == user_label).item()
        else:
            total_user += user_label.size(0)
            user_correct += (user_predicted == user_label).sum().item()

        if product_label.dim() == 0:
            total_product += 1
            product_correct += (product_predicted == product_label).item()
        else:
            total_product += product_label.size(0)
            product_correct += (product_predicted == product_label).sum().item()

print(f'User Test Loss: {user_test_loss / len(user_test)}')
print(f'User Accuracy: {100 * user_correct / total_user}%')
print(f'Product Test Loss: {product_test_loss / len(product_test)}')
print(f'Product Accuracy: {100 * product_correct / total_product}%')