import pandas as pd
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
from torch.utils.data import Dataset, DataLoader
from sklearn.model_selection import train_test_split
from sklearn.metrics import f1_score
import warnings
warnings.filterwarnings('ignore')

# 1. 数据预处理
class TextDataset(Dataset):
    def __init__(self, texts, labels=None, max_len=100):
        self.texts = texts
        self.labels = labels
        self.max_len = max_len
    
    def __len__(self):
        return len(self.texts)
    
    def __getitem__(self, idx):
        text = self.texts[idx]
        # 填充或截断到固定长度
        if len(text) > self.max_len:
            text = text[:self.max_len]
        else:
            text = text + [0] * (self.max_len - len(text))
            
        text = torch.tensor(text, dtype=torch.long)
        
        if self.labels is not None:
            label = torch.tensor(self.labels[idx], dtype=torch.long)
            return text, label
        return text

# 2. 模型定义
class TextCNN(nn.Module):
    def __init__(self, vocab_size, embed_dim, num_classes):
        super(TextCNN, self).__init__()
        
        self.embedding = nn.Embedding(vocab_size, embed_dim)
        
        # 三种不同大小的卷积核
        self.conv1 = nn.Conv1d(embed_dim, 128, 3)
        self.conv2 = nn.Conv1d(embed_dim, 128, 4)
        self.conv3 = nn.Conv1d(embed_dim, 128, 5)
        
        self.pool = nn.AdaptiveMaxPool1d(1)
        self.dropout = nn.Dropout(0.5)
        self.fc = nn.Linear(384, num_classes)
        
    def forward(self, x):
        # x: [batch_size, seq_len]
        x = self.embedding(x)  # [batch_size, seq_len, embed_dim]
        x = x.transpose(1, 2)  # [batch_size, embed_dim, seq_len]
        
        # 应用不同大小的卷积核
        x1 = torch.relu(self.conv1(x))
        x2 = torch.relu(self.conv2(x))
        x3 = torch.relu(self.conv3(x))
        
        # 池化
        x1 = self.pool(x1).squeeze(-1)
        x2 = self.pool(x2).squeeze(-1)
        x3 = self.pool(x3).squeeze(-1)
        
        # 拼接
        x = torch.cat([x1, x2, x3], dim=1)
        x = self.dropout(x)
        x = self.fc(x)
        
        return x

# 3. 训练函数
def train_model(model, train_loader, val_loader, criterion, optimizer, num_epochs=5):
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
    model = model.to(device)
    
    for epoch in range(num_epochs):
        model.train()
        total_loss = 0
        for batch_texts, batch_labels in train_loader:
            batch_texts, batch_labels = batch_texts.to(device), batch_labels.to(device)
            
            optimizer.zero_grad()
            outputs = model(batch_texts)
            loss = criterion(outputs, batch_labels)
            loss.backward()
            optimizer.step()
            
            total_loss += loss.item()
        
        # 验证
        model.eval()
        val_preds = []
        val_labels = []
        with torch.no_grad():
            for batch_texts, batch_labels in val_loader:
                batch_texts = batch_texts.to(device)
                outputs = model(batch_texts)
                preds = torch.argmax(outputs, dim=1).cpu().numpy()
                val_preds.extend(preds)
                val_labels.extend(batch_labels.numpy())
        
        val_f1 = f1_score(val_labels, val_preds, average='macro')
        print(f'Epoch {epoch+1}/{num_epochs}:')
        print(f'Average Loss: {total_loss/len(train_loader):.4f}')
        print(f'Validation F1: {val_f1:.4f}\n')

# 4. 主程序
def main():
    # 读取数据
    print("读取数据...")
    # 文件路径
    train_df = pd.read_csv('D:/deep-learning-course-project-master/train_set.csv', sep='\t')
    test_df = pd.read_csv('D:/deep-learning-course-project-master/test_a.csv', sep='\t')

    
    # 处理文本数据
    def process_text(text):
        return [int(x) for x in text.split()]
    
    train_texts = train_df['text'].apply(process_text).values
    train_labels = train_df['label'].values
    test_texts = test_df['text'].apply(process_text).values
    
    # 划分训练集和验证集
    train_texts, val_texts, train_labels, val_labels = train_test_split(
        train_texts, train_labels, test_size=0.2, random_state=42
    )
    
    # 创建数据集
    print("准备数据集...")
    train_dataset = TextDataset(train_texts, train_labels)
    val_dataset = TextDataset(val_texts, val_labels)
    test_dataset = TextDataset(test_texts)
    
    # 创建数据加载器
    train_loader = DataLoader(train_dataset, batch_size=128, shuffle=True)
    val_loader = DataLoader(val_dataset, batch_size=128)
    test_loader = DataLoader(test_dataset, batch_size=128)
    
    # 初始化模型
    print("初始化模型...")
    vocab_size = 100000  # 根据实际词表大小调整
    embed_dim = 128
    num_classes = 14
    
    model = TextCNN(vocab_size, embed_dim, num_classes)
    criterion = nn.CrossEntropyLoss()
    optimizer = optim.Adam(model.parameters(), lr=0.001)
    
    # 训练模型
    print("开始训练...")
    train_model(model, train_loader, val_loader, criterion, optimizer)
    
    # 预测测试集
    print("预测测试集...")
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
    model.eval()
    test_preds = []
    with torch.no_grad():
        for batch_texts in test_loader:
            batch_texts = batch_texts.to(device)
            outputs = model(batch_texts)
            preds = torch.argmax(outputs, dim=1).cpu().numpy()
            test_preds.extend(preds)
    
    # 保存结果
    print("保存结果...")
    submit_df = pd.DataFrame({'label': test_preds})
    submit_df.to_csv('submit_textcnn.csv', index=False)
    print("完成！")

if __name__ == "__main__":
    main()
