import torch
import torch.nn as nn
import pandas as pd
from transformers import RobertaTokenizer, RobertaModel
from torch.utils.data import Dataset, DataLoader
from datetime import datetime
import os
from tqdm import tqdm

# 确保 result 目录存在
def ensure_result_dir():
    result_dir = "result"
    if not os.path.exists(result_dir):
        os.makedirs(result_dir)
    return result_dir

# 加载 RoBERTa 预训练模型
def load_roberta(model_name="roberta-base"):
    return RobertaTokenizer.from_pretrained(model_name, mirror="tuna"), RobertaModel.from_pretrained(model_name, mirror="tuna")

# 域名分类器
class DomainAPTClassifier(nn.Module):
    def __init__(self, num_classes):
        super(DomainAPTClassifier, self).__init__()
        _, self.roberta = load_roberta()
        self.fc = nn.Linear(self.roberta.config.hidden_size, num_classes)
        self.dropout = nn.Dropout(0.5)

    def forward(self, input_ids, attention_mask):
        outputs = self.roberta(input_ids=input_ids, attention_mask=attention_mask)
        cls_output = outputs.last_hidden_state[:, 0, :]
        cls_output = self.dropout(cls_output)
        logits = self.fc(cls_output)
        return logits

# 自定义数据集
class DomainDataset(Dataset):
    def __init__(self, file_path, tokenizer, max_length=32):
        self.data = pd.read_csv(file_path, names=["domain"], skiprows=1)
        self.tokenizer = tokenizer
        self.max_length = max_length

    def __len__(self):
        return len(self.data)

    def __getitem__(self, idx):
        domain = self.data.iloc[idx, 0]
        encoding = self.tokenizer(domain, padding='max_length', truncation=True, max_length=self.max_length, return_tensors='pt')
        return {
            'input_ids': encoding['input_ids'].squeeze(0),
            'attention_mask': encoding['attention_mask'].squeeze(0)
        }

# 预测函数
def predict(model, predict_loader, device):
    model.to(device)
    model.eval()
    predictions = []

    with torch.no_grad():
        for batch in tqdm(predict_loader, desc="Predicting"):
            input_ids = batch['input_ids'].to(device)
            attention_mask = batch['attention_mask'].to(device)

            outputs = model(input_ids, attention_mask)
            preds = torch.argmax(outputs, dim=1)
            predictions.extend(preds.cpu().numpy())

    return predictions

# 主运行函数：加载旧模型并进行预测
def run_predict():
    tokenizer, _ = load_roberta()
    script_dir = os.path.dirname(os.path.abspath(__file__))
    predict_csv_path = os.path.join(script_dir, "data", "0408predict_test.csv")
    predict_dataset = DomainDataset(predict_csv_path, tokenizer)

    predict_loader = DataLoader(predict_dataset, batch_size=16, shuffle=False)

    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    num_classes = 2
    model = DomainAPTClassifier(num_classes)

    # 加载上次训练好的模型参数
    checkpoint_path = os.path.join("result", "roberta_model_2025-04-22-13-30-19.pth")
    if os.path.exists(checkpoint_path):
        model.load_state_dict(torch.load(checkpoint_path, map_location=device, weights_only=True))
        print(f"成功加载模型参数: {checkpoint_path}")
    else:
        print(f"模型路径不存在: {checkpoint_path}")
        return

    predictions = predict(model, predict_loader, device)

    # 保存预测结果
    result_dir = ensure_result_dir()
    current_time = datetime.now().strftime("%Y-%m-%d-%H-%M-%S")
    result_path = os.path.join(result_dir, f"predict_result_{current_time}.csv")
    predict_data = pd.read_csv(predict_csv_path, names=["domain"], skiprows=1)
    predict_data['prediction'] = predictions
    predict_data.to_csv(result_path, index=False)
    print(f"预测结果已保存到 {result_path}")
    
       # 统计 0 和 1 标签的数量
    label_0_count = sum(pred == 0 for pred in predictions)
    label_1_count = sum(pred == 1 for pred in predictions)
    print(f"标签 0 的数量: {label_0_count}")
    print(f"标签 1 的数量: {label_1_count}")

if __name__ == "__main__":
    run_predict()