import torch
import torch.nn as nn
from torch.utils.data import DataLoader
from models import BertTextCNN
from dataset_preprocessing import PredictionDataset
import pandas as pd
import numpy as np
import argparse
import os
import logging
import time
import gc

# 设置日志
logging.basicConfig(
    level=logging.INFO,
    format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
    handlers=[
        logging.FileHandler("prediction.log"),
        logging.StreamHandler()
    ]
)
logger = logging.getLogger(__name__)

def predict(args):
    # 设置设备
    device = torch.device("cpu")
    logger.info(f"使用设备: {device}")

    # 创建数据集
    logger.info("加载测试数据...")
    test_dataset = PredictionDataset(
        args.test_file, 
        max_length=args.max_length,
        max_segments=args.max_segments
    )
    test_loader = DataLoader(test_dataset, batch_size=args.batch_size)

    # 加载模型
    logger.info("加载BertTextCNN模型...")
    model = BertTextCNN(
        bert_model_path=args.bert_model_path,
        num_labels=args.num_labels,
        filter_sizes=tuple(map(int, args.filter_sizes.split(','))),
        num_filters=args.num_filters
    )
    model.load_state_dict(torch.load(args.model_path, map_location=device))
    model.to(device)
    model.eval()
    
    # 进行预测
    logger.info("开始预测...")
    all_predictions = []
    
    with torch.no_grad():
        for batch_idx, (segments, actual_segments) in enumerate(test_loader):
            batch_size = segments.size(0)
            all_logits = torch.zeros(batch_size, args.num_labels).to(device)
            
            for i in range(batch_size):
                sample_segments = segments[i]
                sample_actual_segments = actual_segments[i].item()
                
                # 只处理实际存在的段
                valid_segments = sample_segments[:sample_actual_segments]
                
                # 对每个段进行预测
                for segment in valid_segments:
                    segment = segment.unsqueeze(0).to(device)  # 添加批次维度
                    segment_logits = model(segment)
                    all_logits[i] += segment_logits.squeeze(0)
                    
                    # 释放内存
                    del segment, segment_logits
                    gc.collect()
            
            # 获取最终预测
            _, predictions = torch.max(all_logits, 1)
            all_predictions.extend(predictions.cpu().numpy())
            
            if (batch_idx + 1) % args.log_interval == 0:
                logger.info(f"已处理 {batch_idx + 1}/{len(test_loader)} 批次")
                
            # 释放内存
            del segments, actual_segments, all_logits, predictions
            gc.collect()
    
    # 保存预测结果
    logger.info("保存预测结果...")
    result_df = pd.DataFrame({"label": all_predictions})
    result_df.to_csv(args.output_file, index=False)
    logger.info(f"预测完成，结果已保存到 {args.output_file}")

if __name__ == "__main__":
    parser = argparse.ArgumentParser(description="使用BertTextCNN模型进行文本分类预测")
    parser.add_argument("--test_file", type=str, default="test_a.csv", help="测试数据文件路径")
    parser.add_argument("--output_file", type=str, default="prediction_result.csv", help="预测结果输出文件路径")
    parser.add_argument("--bert_model_path", type=str, required=True, help="预训练BERT模型路径")
    parser.add_argument("--model_path", type=str, required=True, help="BertTextCNN模型路径")
    parser.add_argument("--max_length", type=int, default=1024, help="最大序列长度")
    parser.add_argument("--max_segments", type=int, default=20, help="每篇文章的最大分段数")
    parser.add_argument("--batch_size", type=int, default=1, help="批量大小")
    parser.add_argument("--num_labels", type=int, default=14, help="分类标签数量")
    parser.add_argument("--filter_sizes", type=str, default="2,3,4", help="卷积核大小，用逗号分隔")
    parser.add_argument("--num_filters", type=int, default=50, help="每种卷积核的数量")
    parser.add_argument("--log_interval", type=int, default=5, help="日志打印间隔（批次数）")
    
    args = parser.parse_args()
    
    predict(args) 