import numpy as np
import pandas as pd
from tensorflow.keras.models import load_model
from tensorflow.keras.preprocessing.sequence import pad_sequences
import pickle
import os

def predict(config):
    # 加载tokenizer
    with open(config['tokenizer_path'], 'rb') as f:
        tokenizer = pickle.load(f)
    
    # 加载测试数据
    test_df = pd.read_csv(config['test_path'], sep='\t')
    x_test = tokenizer.texts_to_sequences(test_df['text'])
    x_test = pad_sequences(x_test, maxlen=config['max_len'], padding='post')
    
    # 预测
    predictions = np.zeros((len(test_df), 14))
    for fold in range(config['n_splits']):
        model = load_model(f"{config['model_dir']}/model_fold_{fold+1}.keras")
        predictions += model.predict(x_test) / config['n_splits']
    
    # 保存结果
    test_df['label'] = np.argmax(predictions, axis=1)
    test_df[['label']].to_csv(config['output_path'], index=False)

if __name__ == '__main__':
    config = {
        'test_path': 'D:/Code/Python/NLP/textCNN/data/test_a.csv',
        'tokenizer_path': 'D:/Code/Python/NLP/textCNN/tmp_data/tokenizer.pickle',
        'model_dir': 'D:/Code/Python/NLP/textCNN/models',
        'output_path': 'D:/Code/Python/NLP/textCNN/results/predictions.csv',
        'max_len': 3000,
        'n_splits': 5
    }
    
    # 确保输出目录存在
    os.makedirs(os.path.dirname(config['output_path']), exist_ok=True)
    
    predict(config) 