import torch
import torch.nn as nn
from gensim.models import Word2Vec
import numpy as np

class TextCNN(nn.Module):
    def __init__(self, vocab_size, embed_dim, num_classes, pretrained_embeddings=None):
        super(TextCNN, self).__init__()
        
        # 词嵌入层
        if pretrained_embeddings is not None:
            self.embedding = nn.Embedding.from_pretrained(
                pretrained_embeddings,
                freeze=False
            )
        else:
            self.embedding = nn.Embedding(vocab_size, embed_dim)
        
        # 三种不同大小的卷积核
        self.conv1 = nn.Conv1d(embed_dim, 128, 3)
        self.conv2 = nn.Conv1d(embed_dim, 128, 4)
        self.conv3 = nn.Conv1d(embed_dim, 128, 5)
        
        self.pool = nn.AdaptiveMaxPool1d(1)
        self.dropout = nn.Dropout(0.5)
        self.fc = nn.Linear(384, num_classes)
        
    def forward(self, x):
        x = self.embedding(x)
        x = x.transpose(1, 2)
        
        x1 = torch.relu(self.conv1(x))
        x2 = torch.relu(self.conv2(x))
        x3 = torch.relu(self.conv3(x))
        
        x1 = self.pool(x1).squeeze(-1)
        x2 = self.pool(x2).squeeze(-1)
        x3 = self.pool(x3).squeeze(-1)
        
        x = torch.cat([x1, x2, x3], dim=1)
        x = self.dropout(x)
        x = self.fc(x)
        
        return x

def train_word2vec(texts, embed_size=128, vocab_size=100000, batch_size=500):
    """
    使用更小的批次和渐进式处理来训练Word2Vec模型
    """
    print("开始处理文本数据...")
    
    # 创建Word2Vec模型
    print("初始化Word2Vec模型...")
    model = Word2Vec(vector_size=embed_size, window=5, min_count=1, workers=4)
    
    print("构建初始词汇表...")
    initial_size = min(1000, len(texts))
    initial_batch = texts[:initial_size]
    initial_tokens = [[str(token) for token in text] for text in initial_batch]
    model.build_vocab(initial_tokens)
    
    total_samples = len(texts)
    print("开始分批训练...")
    train_batch_size = 500
    
    for start_idx in range(0, total_samples, train_batch_size):
        try:
            end_idx = min(start_idx + train_batch_size, total_samples)
            batch_texts = texts[start_idx:end_idx]
            batch_tokens = [[str(token) for token in text] for text in batch_texts]
            model.build_vocab(batch_tokens, update=True)
            model.train(batch_tokens, total_examples=len(batch_tokens), epochs=1)
            
            del batch_tokens
            import gc
            gc.collect()
            
            if (start_idx + train_batch_size) % 10000 == 0:
                print(f"已处理 {start_idx + train_batch_size}/{total_samples} 个样本")
                
        except MemoryError:
            print(f"在处理样本 {start_idx} 时遇到内存错误，尝试减小批次大小...")
            continue
    
    print("创建词嵌入矩阵...")
    embedding_matrix = np.zeros((vocab_size, embed_size))
    embed_batch_size = 100
    
    for i in range(0, vocab_size, embed_batch_size):
        try:
            end_i = min(i + embed_batch_size, vocab_size)
            for j in range(i, end_i):
                try:
                    embedding_matrix[j] = model.wv[str(j)]
                except KeyError:
                    embedding_matrix[j] = np.random.normal(0, 0.1, embed_size)
            
            if (i + embed_batch_size) % 1000 == 0:
                print(f"已处理 {i + embed_batch_size}/{vocab_size} 个词向量")
                gc.collect() 
                
        except MemoryError:
            print(f"在处理词向量 {i} 时遇到内存错误，使用随机初始化...")
            remaining = vocab_size - i
            embedding_matrix[i:] = np.random.normal(0, 0.1, (remaining, embed_size))
            break
    
    print("Word2Vec训练完成")
    return torch.FloatTensor(embedding_matrix)
