import torch
import torch.nn as nn
import pandas as pd
from transformers import BertTokenizer
import json
from sklearn.metrics import accuracy_score, precision_score, recall_score, f1_score
import matplotlib.pyplot as plt

# 检查 CUDA 可用性
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

# 定义 TextCNN 模型
class TextCNN(nn.Module):
    def __init__(self, vocab_size, embedding_dim, num_filters, filter_sizes, output_dim, dropout):
        super().__init__()
        self.embedding = nn.Embedding(vocab_size, embedding_dim)
        self.convs = nn.ModuleList([
            nn.Conv2d(in_channels=1, out_channels=num_filters, kernel_size=(fs, embedding_dim))
            for fs in filter_sizes
        ])
        self.fc = nn.Linear(len(filter_sizes) * num_filters, output_dim)
        self.dropout = nn.Dropout(dropout)

    def forward(self, text):
        embedded = self.embedding(text)
        embedded = embedded.unsqueeze(1)
        conved = [torch.relu(conv(embedded)).squeeze(3) for conv in self.convs]
        pooled = [torch.max(conv, dim=2)[0] for conv in conved]
        cat = self.dropout(torch.cat(pooled, dim=1))
        return self.fc(cat)

# 定义完整模型
class CombinedModel(nn.Module):
    def __init__(self, user_vocab_size, product_vocab_size, embedding_dim=768, num_filters=100, filter_sizes=[3, 4, 5], output_dim=3, dropout=0):
        super().__init__()
        self.user_textcnn = TextCNN(user_vocab_size, embedding_dim, num_filters, filter_sizes, output_dim, dropout)
        self.product_textcnn = TextCNN(product_vocab_size, embedding_dim, num_filters, filter_sizes, output_dim, dropout)

    def forward(self, user_text, product_text):
        user_output = self.user_textcnn(user_text)
        product_output = self.product_textcnn(product_text)
        return user_output, product_output

# 加载 BERT 分词器
tokenizer = BertTokenizer.from_pretrained('C:/Users/yuan/PycharmProjects/tokenizer/calssify/bert-base-chinese')

# 构建词汇表
def build_vocab(texts):
    vocab = {"<unk>": 0}
    index = 1
    for text in texts:
        try:
            data = json.loads(text)
            if isinstance(data, dict):
                if 'description' in data:
                    data = json.loads(data['description'])
                text_str = ' '.join([str(value) for value in data.values()])
            tokens = tokenizer.tokenize(text_str)
            for token in tokens:
                if token not in vocab:
                    vocab[token] = index
                    index += 1
        except json.JSONDecodeError:
            print(f"JSON decoding error for text: {text}")
    return vocab

# 文本处理函数
def text_to_indices(text, vocab):
    try:
        data = json.loads(text)
        if isinstance(data, dict):
            if 'userDescription' in data:
                data = json.loads(data['userDescription'])
            text_str = ' '.join([str(value) for value in data.values()])
        tokens = tokenizer.tokenize(text_str)
        indices = [vocab.get(token, vocab["<unk>"]) for token in tokens]
        return torch.tensor(indices, dtype=torch.long).to(device)
    except json.JSONDecodeError:
        print(f"JSON decoding error for text: {text}")
        return torch.tensor([], dtype=torch.long).to(device)

# 加载训练数据
train_user_df = pd.read_csv('train_user.csv')
train_product_df = pd.read_csv('train_product.csv')
test_user_df = pd.read_csv('test_user.csv')
test_product_df = pd.read_csv('test_product.csv')

# 提取训练数据的描述和标签
train_user_texts = train_user_df['description'].tolist()
train_user_labels = torch.tensor(train_user_df['label'].tolist(), dtype=torch.long).to(device)
train_product_texts = train_product_df['description'].tolist()
train_product_labels = torch.tensor(train_product_df['label'].tolist(), dtype=torch.long).to(device)

# 提取测试数据的描述和标签
test_user_texts = test_user_df['description'].tolist()
test_user_labels = torch.tensor(test_user_df['label'].tolist(), dtype=torch.long).to(device)
test_product_texts = test_product_df['description'].tolist()
test_product_labels = torch.tensor(test_product_df['label'].tolist(), dtype=torch.long).to(device)

# 构建词汇表
user_vocab = build_vocab(train_user_texts)
product_vocab = build_vocab(train_product_texts)

# 将文本转换为索引
train_user_indices = [text_to_indices(text, user_vocab) for text in train_user_texts]
train_product_indices = [text_to_indices(text, product_vocab) for text in train_product_texts]
test_user_indices = [text_to_indices(text, user_vocab) for text in test_user_texts]
test_product_indices = [text_to_indices(text, product_vocab) for text in test_product_texts]

# 初始化模型、损失函数和优化器
model = CombinedModel(len(user_vocab), len(product_vocab)).to(device)
criterion = nn.CrossEntropyLoss().to(device)
optimizer = torch.optim.Adam(model.parameters(), lr=0.001, weight_decay=0.0001)
scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=10, gamma=0.1)

# 记录损失值、准确率和 F1 分数
user_losses = []
product_losses = []
user_accuracies = []
user_f1_scores = []
product_accuracies = []
product_f1_scores = []
test_epochs = []

# 记录评估指标用于表格
user_table_data = []
product_table_data = []

# 训练前先进行一次测试
model.eval()
user_preds = []
product_preds = []
with torch.no_grad():
    for user_text, user_label, product_text, product_label in zip(test_user_indices, test_user_labels, test_product_indices, test_product_labels):
        user_text = user_text.unsqueeze(0)
        product_text = product_text.unsqueeze(0)
        user_output, product_output = model(user_text, product_text)
        user_pred = torch.argmax(user_output, dim=1).item()
        product_pred = torch.argmax(product_output, dim=1).item()
        user_preds.append(user_pred)
        product_preds.append(product_pred)

user_accuracy = accuracy_score(test_user_labels.cpu().numpy(), user_preds)
user_precision = precision_score(test_user_labels.cpu().numpy(), user_preds, average='weighted')
user_recall = recall_score(test_user_labels.cpu().numpy(), user_preds, average='weighted')
user_f1 = f1_score(test_user_labels.cpu().numpy(), user_preds, average='weighted')

product_accuracy = accuracy_score(test_product_labels.cpu().numpy(), product_preds)
product_precision = precision_score(test_product_labels.cpu().numpy(), product_preds, average='weighted')
product_recall = recall_score(test_product_labels.cpu().numpy(), product_preds, average='weighted')
product_f1 = f1_score(test_product_labels.cpu().numpy(), product_preds, average='weighted')

# 使用round函数保留小数点后4位
user_accuracy = round(user_accuracy, 4)
user_precision = round(user_precision, 4)
user_recall = round(user_recall, 4)
user_f1 = round(user_f1, 4)

product_accuracy = round(product_accuracy, 4)
product_precision = round(product_precision, 4)
product_recall = round(product_recall, 4)
product_f1 = round(product_f1, 4)

user_accuracies.append(user_accuracy)
user_f1_scores.append(user_f1)
product_accuracies.append(product_accuracy)
product_f1_scores.append(product_f1)
test_epochs.append(0)

user_table_data.append([0, user_accuracy, user_precision, user_recall, user_f1])
product_table_data.append([0, product_accuracy, product_precision, product_recall, product_f1])

print(f'Epoch 0 User Test: Accuracy: {user_accuracy * 100:.2f}%, Precision: {user_precision * 100:.2f}%, Recall: {user_recall * 100:.2f}%, F1: {user_f1 * 100:.2f}%')
print(f'Epoch 0 Product Test: Accuracy: {product_accuracy * 100:.2f}%, Precision: {product_precision * 100:.2f}%, Recall: {product_recall * 100:.2f}%, F1: {product_f1 * 100:.2f}%')

# 训练模型
num_epochs = 50
for epoch in range(num_epochs):
    model.train()
    user_train_loss = 0
    product_train_loss = 0
    for user_text, user_label, product_text, product_label in zip(train_user_indices, train_user_labels, train_product_indices, train_product_labels):
        user_text = user_text.unsqueeze(0)
        product_text = product_text.unsqueeze(0)
        optimizer.zero_grad()
        user_output, product_output = model(user_text, product_text)
        user_loss = criterion(user_output, user_label.unsqueeze(0))
        product_loss = criterion(product_output, product_label.unsqueeze(0))
        loss = user_loss + product_loss
        loss.backward()
        optimizer.step()
        user_train_loss += user_loss.item()
        product_train_loss += product_loss.item()
    scheduler.step()
    user_avg_loss = user_train_loss / len(train_user_indices)
    product_avg_loss = product_train_loss / len(train_product_indices)
    user_losses.append(user_avg_loss)
    product_losses.append(product_avg_loss)
    print(f'Epoch: {epoch + 1}, User Loss: {user_avg_loss}, Product Loss: {product_avg_loss}')

    # 每隔 5 个 epoch 进行一次测试
    if (epoch + 1) % 5 == 0:
        model.eval()
        user_preds = []
        product_preds = []
        with torch.no_grad():
            for user_text, user_label, product_text, product_label in zip(test_user_indices, test_user_labels, test_product_indices, test_product_labels):
                user_text = user_text.unsqueeze(0)
                product_text = product_text.unsqueeze(0)
                user_output, product_output = model(user_text, product_text)
                user_pred = torch.argmax(user_output, dim=1).item()
                product_pred = torch.argmax(product_output, dim=1).item()
                user_preds.append(user_pred)
                product_preds.append(product_pred)

        user_accuracy = accuracy_score(test_user_labels.cpu().numpy(), user_preds)
        user_precision = precision_score(test_user_labels.cpu().numpy(), user_preds, average='weighted')
        user_recall = recall_score(test_user_labels.cpu().numpy(), user_preds, average='weighted')
        user_f1 = f1_score(test_user_labels.cpu().numpy(), user_preds, average='weighted')

        product_accuracy = accuracy_score(test_product_labels.cpu().numpy(), product_preds)
        product_precision = precision_score(test_product_labels.cpu().numpy(), product_preds, average='weighted')
        product_recall = recall_score(test_product_labels.cpu().numpy(), product_preds, average='weighted')
        product_f1 = f1_score(test_product_labels.cpu().numpy(), product_preds, average='weighted')

        # 使用round函数保留小数点后4位
        user_accuracy = round(user_accuracy, 4)
        user_precision = round(user_precision, 4)
        user_recall = round(user_recall, 4)
        user_f1 = round(user_f1, 4)

        product_accuracy = round(product_accuracy, 4)
        product_precision = round(product_precision, 4)
        product_recall = round(product_recall, 4)
        product_f1 = round(product_f1, 4)

        user_accuracies.append(user_accuracy)
        user_f1_scores.append(user_f1)
        product_accuracies.append(product_accuracy)
        product_f1_scores.append(product_f1)
        test_epochs.append(epoch + 1)

        user_table_data.append([epoch + 1, user_accuracy, user_precision, user_recall, user_f1])
        product_table_data.append([epoch + 1, product_accuracy, product_precision, product_recall, product_f1])

        print(f'Epoch {epoch + 1} User Test: Accuracy: {user_accuracy * 100:.2f}%, Precision: {user_precision * 100:.2f}%, Recall: {user_recall * 100:.2f}%, F1: {user_f1 * 100:.2f}%')
        print(f'Epoch {epoch + 1} Product Test: Accuracy: {product_accuracy * 100:.2f}%, Precision: {product_precision * 100:.2f}%, Recall: {product_recall * 100:.2f}%, F1: {product_f1 * 100:.2f}%')

# 创建 user 表格
user_columns = ['Epoch', 'User Accuracy', 'User Precision', 'User Recall', 'User F1']
user_df = pd.DataFrame(user_table_data, columns=user_columns)

# 创建 product 表格
product_columns = ['Epoch', 'Product Accuracy', 'Product Precision', 'Product Recall', 'Product F1']
product_df = pd.DataFrame(product_table_data, columns=product_columns)

# 显示表格
print("User Table:")
print(user_df)
print("Product Table:")
print(product_df)

# 将表格保存为 CSV 文件
user_csv_file_path = 'user_evaluation_metrics.csv'
product_csv_file_path = 'product_evaluation_metrics.csv'
user_df.to_csv(user_csv_file_path, index=False)
product_df.to_csv(product_csv_file_path, index=False)
print(f"User 表格已保存为 {user_csv_file_path}")
print(f"Product 表格已保存为 {product_csv_file_path}")

# 绘制折线图
epochs = range(1, num_epochs + 1)

plt.figure(figsize=(12, 8))

# 绘制用户和产品损失函数值折线图
plt.subplot(2, 1, 1)
plt.plot(epochs, user_losses, label='User Loss')
plt.plot(epochs, product_losses, label='Product Loss')
plt.xlabel('Epoch')
plt.ylabel('Loss')
plt.title('User and Product Loss')
plt.legend()

# 绘制准确率和 F1 分数折线图
plt.subplot(2, 1, 2)
plt.plot(test_epochs, user_accuracies, label='User Accuracy')
plt.plot(test_epochs, user_f1_scores, label='User F1 Score')
plt.plot(test_epochs, product_accuracies, label='Product Accuracy')
plt.plot(test_epochs, product_f1_scores, label='Product F1 Score')
plt.xlabel('Epoch')
plt.ylabel('Score')
plt.title('Accuracy and F1 Score')
plt.legend()

plt.tight_layout()
plt.show()
