# -*- coding:utf-8 -*-
import os
import sys
import time
import warnings
import logging
import pandas as pd
import numpy as np
import joblib
from concurrent.futures import ProcessPoolExecutor

warnings.filterwarnings("ignore", category=FutureWarning)

# 配置日志
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
logger = logging.getLogger(__name__)

PROJECT_ROOT = os.path.dirname(os.path.abspath(__file__))


# 定义EnsembleModel类，与训练脚本中的定义相同
class EnsembleModel:
    def __init__(self, model1, model2):
        self.model1 = model1
        self.model2 = model2

    def predict_proba(self, X):
        proba1 = self.model1.predict_proba(X)
        proba2 = self.model2.predict_proba(X)
        return (proba1 + proba2) / 2

    def predict(self, X):
        proba = self.predict_proba(X)
        return (proba[:, 1] > 0.5).astype(int)


def validate_data_directory():
    """检查数据目录"""
    data_dir = os.path.join(PROJECT_ROOT, 'data', 'data_format1')
    required_files = [
        'test_format1.csv',
        'user_info_format1.csv',
        'user_log_format1.csv'
    ]

    if not os.path.exists(data_dir):
        logger.error(f"数据目录不存在: {data_dir}")
        return False

    missing_files = []
    for file in required_files:
        file_path = os.path.join(data_dir, file)
        if not os.path.exists(file_path):
            missing_files.append(file_path)

    if missing_files:
        logger.error("以下数据文件缺失:")
        for file in missing_files:
            logger.error(f"  - {file}")
        return False

    logger.info(f"数据目录验证通过: {data_dir}")
    return True


def validate_model_directory():
    """检查模型目录"""
    model_dir = os.path.join(PROJECT_ROOT, 'model')
    feature_selector_path = os.path.join(PROJECT_ROOT, 'feature_selection', 'feature_selector.pkl')

    # 检查模型目录是否存在
    if not os.path.exists(model_dir):
        logger.error(f"模型目录不存在: {model_dir}")
        return None, None, None

    # 检查特征选择器是否存在
    if not os.path.exists(feature_selector_path):
        logger.error(f"特征选择器不存在: {feature_selector_path}")
        return None, None, None

    # 查找最新的模型文件
    model_files = [f for f in os.listdir(model_dir) if f.endswith('.joblib') and '_label_encoders' not in f]
    if not model_files:
        logger.error(f"在模型目录中找不到任何模型文件 (.joblib): {model_dir}")
        return None, None, None

    # 按时间排序，获取最新的模型文件
    model_files.sort(reverse=True)
    latest_model = model_files[0]
    model_path = os.path.join(model_dir, latest_model)

    if not os.path.exists(model_path):
        logger.error(f"模型文件不存在: {model_path}")
        return None, None, None

    # 获取特征文件 - 使用模型文件名生成特征文件名
    feature_file = latest_model.replace('.joblib', '_features.txt')
    feature_path = os.path.join(model_dir, feature_file)

    if not os.path.exists(feature_path):
        # 尝试查找其他可能命名的特征文件
        alt_feature_files = [f for f in os.listdir(model_dir)
                             if f.startswith(latest_model.split('.')[0]) and f.endswith('_features.txt')]

        if alt_feature_files:
            feature_path = os.path.join(model_dir, alt_feature_files[0])
            logger.info(f"找到备选特征文件: {feature_path}")
        else:
            logger.error(f"特征文件不存在: {feature_path}")
            return None, None, None

    # 查找标签编码器文件
    encoder_file = latest_model.replace('.joblib', '_label_encoders.joblib')
    encoder_path = os.path.join(model_dir, encoder_file)

    if not os.path.exists(encoder_path):
        logger.warning(f"标签编码器文件不存在: {encoder_path}")
        logger.warning("预测时将不会进行分类特征编码转换")
        encoder_path = None
    else:
        logger.info(f"找到标签编码器文件: {encoder_path}")

    logger.info(f"找到模型文件: {model_path}")
    logger.info(f"找到特征文件: {feature_path}")
    return model_path, feature_path, feature_selector_path, encoder_path


def load_features(feature_path):
    """加载特征列表"""
    with open(feature_path, 'r') as f:
        features = [line.strip() for line in f.readlines()]
    return features


def encode_categorical_features(data, encoders, features):
    """使用训练时的编码器对分类特征进行编码"""
    encoded_data = data.copy()

    for feature in features:
        if feature in encoders:
            # 确保特征是字符串类型
            encoded_data[feature] = encoded_data[feature].astype(str)

            # 转换特征
            try:
                encoded_data[feature] = encoders[feature].transform(encoded_data[[feature]]).flatten()
                logger.debug(f"成功编码特征: {feature}")
            except Exception as e:
                logger.warning(f"编码特征 {feature} 时出错: {e}")
                # 如果转换失败，将未知类别设置为-1
                encoded_data[feature] = -1

    return encoded_data


def predict_chunk(args):
    """预测数据块"""
    model, data_chunk, features = args
    return model.predict_proba(data_chunk[features])


def main():
    try:
        # 验证数据目录
        if not validate_data_directory():
            logger.error("数据目录验证失败，程序终止")
            return

        # 验证模型目录 - 现在返回4个值
        model_path, feature_path, feature_selector_path, encoder_path = validate_model_directory()
        if not model_path or not feature_path or not feature_selector_path:
            logger.error("模型文件验证失败，程序终止")
            return

        # 加载特征选择器
        feature_selector = joblib.load(feature_selector_path)
        top_features = feature_selector['top_features']
        logger.info(f"加载特征选择器，选择{len(top_features)}个特征")

        # 加载模型特征列表
        model_features = load_features(feature_path)
        logger.info(f"模型使用{len(model_features)}个特征")

        # 加载标签编码器（如果存在）
        encoders = None
        if encoder_path:
            try:
                encoders = joblib.load(encoder_path)
                logger.info(f"成功加载标签编码器，包含{len(encoders)}个编码器")
            except Exception as e:
                logger.error(f"加载标签编码器时出错: {e}")
                logger.warning("预测时将不会进行分类特征编码转换")
                encoders = None

        # 动态导入dataset模块
        sys.path.insert(0, PROJECT_ROOT)
        import dataset as ds

        logger.info("加载测试数据集...")
        test_data, info_df = ds.load_test()
        logger.info(f"数据集加载完成，形状: {test_data.shape}")

        if test_data.empty:
            logger.error("加载的测试数据集为空")
            return

        logger.info("填充缺失值...")
        test_data.fillna(0, inplace=True)
        logger.info("填充完成")

        # 识别分类特征
        categorical_features = []
        if encoders:
            categorical_features = list(encoders.keys())
            logger.info(f"识别出{len(categorical_features)}个分类特征")
        else:
            # 如果没有编码器，尝试从数据类型推断分类特征
            categorical_features = [col for col in test_data.columns if test_data[col].dtype == 'object']
            logger.warning(f"没有找到标签编码器，将使用数据类型推断分类特征: {len(categorical_features)}个")

        # 确保所有特征存在
        missing_features = [f for f in model_features if f not in test_data.columns]
        if missing_features:
            logger.warning(f"测试数据中缺失{len(missing_features)}个特征，将填充0")
            for f in missing_features:
                test_data[f] = 0

        # 选择特征
        test_data = test_data[model_features]
        logger.info(f"特征选择后数据集形状: {test_data.shape}")

        # 对分类特征进行编码
        if encoders and categorical_features:
            logger.info("开始对分类特征进行编码...")
            test_data = encode_categorical_features(test_data, encoders, categorical_features)
            logger.info(f"分类特征编码完成: {len(categorical_features)}个特征")
        else:
            logger.warning("跳过分类特征编码，可能导致预测错误")

        logger.info("加载模型...")
        model = joblib.load(model_path)
        logger.info(f"模型加载成功: {os.path.basename(model_path)}")

        # 多进程预测
        logger.info("进行多进程预测...")
        num_chunks = min(8, os.cpu_count())  # 根据CPU核心数确定分块数量
        chunks = np.array_split(test_data, num_chunks)

        start_time = time.time()
        with ProcessPoolExecutor(max_workers=num_chunks) as executor:
            args = [(model, chunk, model_features) for chunk in chunks]
            results = list(executor.map(predict_chunk, args))

        # 合并结果
        y_pred = np.vstack(results)
        ans = y_pred[:, 1]
        prediction_time = time.time() - start_time
        logger.info(f"预测完成，耗时: {prediction_time:.2f}秒")

        logger.info("生成答案文件...")
        info_df['prob'] = ans

        # 创建结果文件名
        ans_name = 'ans_' + time.strftime('%Y%m%d_%H%M%S', time.localtime())
        result_dir = os.path.join(PROJECT_ROOT, 'result')
        os.makedirs(result_dir, exist_ok=True)
        output_path = os.path.join(result_dir, ans_name + '.csv')

        logger.info(f"保存结果到: {output_path}")
        info_df.to_csv(output_path, index=False)
        logger.info("保存完成")
        logger.info("程序执行成功")

    except Exception as e:
        logger.error(f"发生错误: {e}", exc_info=True)
        logger.error("程序执行失败")


if __name__ == '__main__':
    logger.info(f"项目根目录: {PROJECT_ROOT}")
    logger.info("启动天猫复购预测程序")
    main()
    logger.info("程序结束")
