import os
import logging
import numpy as np
import pandas as pd
from tensorflow.keras.preprocessing.text import Tokenizer
from tensorflow.keras.preprocessing.sequence import pad_sequences
from tensorflow.keras.utils import to_categorical
from sklearn.model_selection import StratifiedKFold
from models.textcnn import build_textcnn_model
import gensim
from tensorflow.keras.callbacks import EarlyStopping, ModelCheckpoint, ReduceLROnPlateau
import tensorflow as tf
import pickle

logging.basicConfig(format='%(asctime)s %(levelname)s: %(message)s',
                   level=logging.INFO,
                   datefmt='%Y-%m-%d %H:%M:%S')

def train(config):
    logging.info("Loading training data...")
    # 加载数据
    train_df = pd.read_csv(config['train_path'], sep='\t')
    test_df = pd.read_csv(config['test_path'], sep='\t')
    
    # 添加数据采样
    if config.get('sample_size'):  # 如果设置了sample_size
        logging.info(f"Sampling {config['sample_size']} examples from training data...")
        train_df = train_df.sample(n=config['sample_size'], random_state=42)
        logging.info(f"Training data shape after sampling: {train_df.shape}")
    
    logging.info("Preparing tokenizer...")
    # 准备tokenizer
    tokenizer = Tokenizer()
    tokenizer.fit_on_texts(pd.concat([train_df['text'], test_df['text']]))
    
    # 保存tokenizer
    logging.info("Saving tokenizer...")
    os.makedirs(os.path.dirname(config['tokenizer_path']), exist_ok=True)
    with open(config['tokenizer_path'], 'wb') as f:
        pickle.dump(tokenizer, f)
    
    logging.info("Loading word2vec model...")
    # 加载词向量
    word2vec = gensim.models.Word2Vec.load(config['word2vec_path'])
    
    logging.info("Preparing embedding matrix...")
    # 准备embedding matrix
    embeddings_matrix = np.zeros((len(tokenizer.word_index) + 1, config['vector_size']))
    for word, i in tokenizer.word_index.items():
        if word in word2vec.wv.key_to_index:
            embeddings_matrix[i] = word2vec.wv[word]
    
    logging.info("Preparing training data...")        
    # 准备训练数据
    x_train = tokenizer.texts_to_sequences(train_df['text'])
    logging.info(f"Converting sequences to padded form (max_len={config['max_len']})...")
    x_train = pad_sequences(x_train, maxlen=config['max_len'], padding='post')
    y_train = to_categorical(train_df['label'])
    
    logging.info(f"Starting {config['n_splits']}-fold cross validation training...")
    # K折交叉验证
    skf = StratifiedKFold(n_splits=config['n_splits'], shuffle=True, random_state=42)
    
    for fold, (train_idx, val_idx) in enumerate(skf.split(x_train, train_df['label']), 1):
        logging.info(f'Training fold {fold}/{config["n_splits"]}')
        logging.info(f'Train size: {len(train_idx)}, Validation size: {len(val_idx)}')
        
        model = build_textcnn_model(config['max_len'], embeddings_matrix)
        logging.info(f'Model built. Starting training for {config["epochs"]} epochs...')
        
        # 添加回调函数
        callbacks = [
            EarlyStopping(
                monitor='val_accuracy',
                patience=3,
                mode='max',
                verbose=1
            ),
            ReduceLROnPlateau(
                monitor='val_accuracy',
                factor=0.5,
                patience=2,
                mode='max',
                verbose=1
            ),
            ModelCheckpoint(
                os.path.join(config['model_dir'], f'model_fold_{fold}.keras'),
                monitor='val_accuracy',
                mode='max',
                save_best_only=True,
                verbose=1
            )
        ]
        
        # 训练
        history = model.fit(
            x_train[train_idx],
            y_train[train_idx],
            validation_data=(x_train[val_idx], y_train[val_idx]),
            epochs=config['epochs'],
            batch_size=config['batch_size'],
            callbacks=callbacks,
            verbose=1
        )
        
        # 输出最终的验证集性能
        val_loss = history.history['val_loss'][-1]
        val_acc = history.history['val_accuracy'][-1]
        logging.info(f'Fold {fold} final validation loss: {val_loss:.4f}, accuracy: {val_acc:.4f}')

if __name__ == '__main__':
    config = {
        'train_path': 'D:/Code/Python/NLP/textCNN/data/train_set.csv',
        'test_path': 'D:/Code/Python/NLP/textCNN/data/test_a.csv',
        'word2vec_path': 'D:/Code/Python/NLP/textCNN/tmp_data/word2vec.d300.sg.w5.model',
        'model_dir': 'D:/Code/Python/NLP/textCNN/models',
        'tokenizer_path': 'D:/Code/Python/NLP/textCNN/tmp_data/tokenizer.pickle',
        'max_len': 3000,
        'vector_size': 300,
        'n_splits': 5,
        'epochs': 10,
        'batch_size': 128,
        'sample_size': 5000  # 先用5000条数据测试
    }
    
    # 确保模型保存目录存在
    os.makedirs(config['model_dir'], exist_ok=True)
    
    train(config) 