import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis as LDA
from sklearn.svm import SVC
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import StandardScaler
from transformers import AutoModel, AutoTokenizer


class TextClassifier(nn.Module):
    def __init__(self, model_name, num_classes, classifier_type='linear'):
        super(TextClassifier, self).__init__()
        self.model_name = model_name
        self.num_classes = num_classes
        self.classifier_type = classifier_type.lower()
        self.model = AutoModel.from_pretrained(model_name)

        if classifier_type == 'linear':
            self.classifier = nn.Linear(self.model.config.hidden_size, num_classes)
        elif classifier_type == 'lda':
            self.classifier = LDA()
        elif classifier_type == 'svm':
            self.classifier = SVC(probability=True)
        elif classifier_type == 'cnn':
            self.conv1 = nn.Conv1d(self.model.config.hidden_size, 128, kernel_size=3, padding=1)
            self.conv2 = nn.Conv1d(128, 64, kernel_size=3, padding=1)
            self.fc = nn.Linear(64, num_classes)
        else:
            raise ValueError("Unsupported classifier type. Choose from 'linear', 'lda', 'svm', 'cnn'.")

    def forward(self, input_ids, attention_mask):
        outputs = self.model(input_ids, attention_mask=attention_mask)
        pooled_output = outputs.pooler_output  # [batch_size, hidden_size]

        if self.classifier_type == 'linear':
            logits = self.classifier(pooled_output)
        elif self.classifier_type in ['lda', 'svm']:
            # Convert to numpy array for scikit-learn models
            pooled_output = pooled_output.detach().cpu().numpy()
            logits = self.classifier.predict(pooled_output)
            logits = torch.tensor(logits, device=input_ids.device)
        elif self.classifier_type == 'cnn':
            # Reshape for CNN
            pooled_output = pooled_output.unsqueeze(2)  # [batch_size, hidden_size, 1]
            x = F.relu(self.conv1(pooled_output))  # [batch_size, 128, 1]
            x = F.relu(self.conv2(x))  # [batch_size, 64, 1]
            x = x.view(x.size(0), -1)  # Flatten
            logits = self.fc(x)
        return logits

    def fit(self, X_train, y_train):
        if self.classifier_type in ['lda', 'svm']:
            self.classifier.fit(X_train, y_train)

    def predict(self, X_test):
        if self.classifier_type in ['lda', 'svm']:
            return self.classifier.predict(X_test)
        else:
            raise ValueError("predict method is only supported for 'lda' and 'svm' classifiers.")


# 定义数据集
class TextDataset(torch.utils.data.Dataset):
    def __init__(self, input_ids, attention_masks, labels):
        self.input_ids = input_ids
        self.attention_masks = attention_masks
        self.labels = labels

    def __len__(self):
        return len(self.input_ids)

    def __getitem__(self, idx):
        return {
            'input_ids': self.input_ids[idx],
            'attention_mask': self.attention_masks[idx],
            'label': self.labels[idx]
        }


# 训练循环
def train(model, dataloader, optimizer, device):
    model.train()
    total_loss = 0
    for batch in dataloader:
        input_ids = batch['input_ids'].to(device)
        attention_mask = batch['attention_mask'].to(device)
        labels = batch['label'].to(device)

        optimizer.zero_grad()
        outputs = model(input_ids, attention_mask=attention_mask)
        loss = F.cross_entropy(outputs, labels)
        loss.backward()
        optimizer.step()

        total_loss += loss.item()
    print(f"Training Loss: {total_loss / len(dataloader)}")


# 评估循环
def evaluate(model, dataloader, device):
    model.eval()
    total_correct = 0
    with torch.no_grad():
        for batch in dataloader:
            input_ids = batch['input_ids'].to(device)
            attention_mask = batch['attention_mask'].to(device)
            labels = batch['label'].to(device)

            outputs = model(input_ids, attention_mask=attention_mask)
            _, predicted = torch.max(outputs, dim=1)
            total_correct += (predicted == labels).sum().item()

    accuracy = total_correct / len(dataloader.dataset)
    print(f"Evaluation Accuracy: {accuracy:.4f}")
