#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
对话短文本语义匹配
"""

import sys
import pandas as pd
import numpy as np
from pathlib import Path
import subprocess
import time
import logging
import argparse
import os

# 配置日志
logging.basicConfig(
    level=logging.INFO,
    format='%(asctime)s - %(levelname)s - %(message)s',
    handlers=[logging.StreamHandler()]
)
logger = logging.getLogger(__name__)

# 检查依赖并尝试自动安装
def check_dependencies():
    """检查并安装依赖"""
    missing_packages = []
    required_packages = {
        "sklearn": "scikit-learn",
        "xgboost": "xgboost",
        "joblib": "joblib"
    }

    for module, package in required_packages.items():
        try:
            __import__(module)
        except ImportError:
            missing_packages.append(package)

    if missing_packages:
        logger.warning(f"⚠️ 检测到缺少以下依赖: {', '.join(missing_packages)}")
        install = input("是否自动安装这些依赖? (y/n): ").strip().lower()
        
        if install == 'y':
            logger.info("🔄 正在安装依赖...")
            for package in missing_packages:
                logger.info(f"安装 {package}...")
                try:
                    subprocess.check_call([sys.executable, "-m", "pip", "install", package])
                    logger.info(f"✅ {package} 安装成功")
                except subprocess.CalledProcessError:
                    logger.error(f"❌ {package} 安装失败")
                    logger.info(f"请手动安装: pip install {package}")
                    sys.exit(1)
            logger.info("✅ 所有依赖安装完成，请重新运行程序")
            sys.exit(0)
        else:
            logger.error("❌ 请手动安装以下依赖后再运行程序:")
            for package in missing_packages:
                logger.info(f"pip install {package}")
            sys.exit(1)

# 现在导入依赖
check_dependencies()
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score, f1_score, classification_report
import xgboost as xgb
import joblib

def create_sample_data():
    """
    加载比赛数据集
    
    Returns:
        tuple: (训练集DataFrame, 测试集DataFrame)
    """
    # 检查是否已存在处理后的数据集
    train_path = Path("data/raw/train.csv")
    test_path = Path("data/raw/test.csv")
    
    if train_path.exists() and test_path.exists():
        logger.info("✅ 处理后的数据集已存在，直接加载")
        train_df = pd.read_csv(train_path)
        test_df = pd.read_csv(test_path)
        return train_df, test_df
    
    # 如果处理后的数据集不存在，尝试加载原始数据
    logger.info("🔄 处理后的数据集不存在，尝试加载原始数据...")
    
    # 训练数据文件路径
    train_files = [
        "数据集/gaiic_track3_round1_train_20210228.tsv",
        "数据集/gaiic_track3_round2_train_20210407.tsv"
    ]
    
    # 测试数据文件路径
    test_files = [
        "数据集/gaiic_track3_round1_testA_20210228.tsv",
        "数据集/gaiic_track3_round1_testB_20210317.tsv"
    ]
    
    # 加载训练数据
    train_dfs = []
    for file_path in train_files:
        if Path(file_path).exists():
            logger.info(f"🔄 加载训练数据: {file_path}")
            df = pd.read_csv(file_path, sep='\t', header=None, names=['query1', 'query2', 'label'])
            train_dfs.append(df)
        else:
            logger.warning(f"⚠️ 训练数据文件不存在: {file_path}")
    
    # 加载测试数据
    test_dfs = []
    for file_path in test_files:
        if Path(file_path).exists():
            logger.info(f"🔄 加载测试数据: {file_path}")
            df = pd.read_csv(file_path, sep='\t', header=None, names=['query1', 'query2'])
            test_dfs.append(df)
        else:
            logger.warning(f"⚠️ 测试数据文件不存在: {file_path}")
    
    # 合并数据
    if train_dfs:
        train_df = pd.concat(train_dfs, ignore_index=True)
        train_df['id'] = range(len(train_df))
        train_df = train_df[['id', 'query1', 'query2', 'label']]
        logger.info(f"✅ 训练数据加载完成: {train_df.shape}")
    else:
        logger.error("❌ 未能加载任何训练数据，将使用示例数据")
        return _create_sample_data()
    
    if test_dfs:
        test_df = pd.concat(test_dfs, ignore_index=True)
        test_df['id'] = range(len(test_df))
        test_df = test_df[['id', 'query1', 'query2']]
        logger.info(f"✅ 测试数据加载完成: {test_df.shape}")
    else:
        logger.error("❌ 未能加载任何测试数据，将使用示例数据")
        _, test_df = _create_sample_data()
    
    # 保存处理后的数据
    Path("data/raw").mkdir(parents=True, exist_ok=True)
    train_df.to_csv(train_path, index=False)
    test_df.to_csv(test_path, index=False)
    
    logger.info("✅ 数据集处理完成并保存")
    return train_df, test_df

def _create_sample_data():
    """
    生成示例数据（当无法加载实际数据时使用）
    
    Returns:
        tuple: (训练集DataFrame, 测试集DataFrame)
    """
    logger.info("🔄 创建小型示例数据集...")
    
    # 创建目录
    Path("data/raw").mkdir(parents=True, exist_ok=True)
    
    # 示例训练数据
    train_data = {
        'id': list(range(10)),
        'query1': [
            "播放音乐", "打电话", "查看天气", "设置闹钟", "导航回家",
            "开灯", "关空调", "搜索餐厅", "讲个笑话", "发短信"
        ],
        'query2': [
            "放首歌", "给妈妈打电话", "今天天气怎么样", "明天早上七点叫我", "回家的路线",
            "把灯打开", "把空调关了", "附近有什么好吃的", "说个笑话", "发消息"
        ],
        'label': [1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
    }
    
    # 示例测试数据
    test_data = {
        'id': list(range(5)),
        'query1': [
            "播放周杰伦的歌", "给爸爸打电话", "明天会下雨吗", "设置一个闹钟", "怎么去公司"
        ],
        'query2': [
            "来首周杰伦的", "呼叫爸爸", "明天天气预报", "定个闹钟", "到公司的路线"
        ]
    }
    
    train_df = pd.DataFrame(train_data)
    test_df = pd.DataFrame(test_data)
    
    logger.info("✅ 示例数据集创建完成")
    return train_df, test_df

def preprocess_text(text):
    """
    文本预处理
    
    Args:
        text (str): 输入文本
        
    Returns:
        str: 处理后的文本
    """
    # 简单的文本清洗
    if not isinstance(text, str):
        return ""
    
    # 去除特殊字符和多余空格
    text = text.strip()
    
    return text

def extract_features(train_df, test_df):
    """
    特征提取
    
    Args:
        train_df (DataFrame): 训练集
        test_df (DataFrame): 测试集
        
    Returns:
        tuple: (训练特征, 测试特征, 标签, 向量化器)
    """
    logger.info("🔄 提取特征...")
    
    # 预处理文本
    train_df['query1_processed'] = train_df['query1'].apply(preprocess_text)
    train_df['query2_processed'] = train_df['query2'].apply(preprocess_text)
    
    test_df['query1_processed'] = test_df['query1'].apply(preprocess_text)
    test_df['query2_processed'] = test_df['query2'].apply(preprocess_text)
    
    # 使用TF-IDF向量化
    vectorizer = TfidfVectorizer(
        analyzer='char_wb',  # 字符级别的n-gram，适合中文
        ngram_range=(1, 3),  # 1-3个字符的n-gram
        max_features=5000,   # 最多5000个特征
        sublinear_tf=True    # 使用次线性TF缩放
    )
    
    # 合并所有文本以便拟合向量化器
    all_text = pd.concat([
        train_df['query1_processed'], 
        train_df['query2_processed'],
        test_df['query1_processed'],
        test_df['query2_processed']
    ])
    
    vectorizer.fit(all_text)
    
    # 转换文本为TF-IDF特征
    train_q1_features = vectorizer.transform(train_df['query1_processed'])
    train_q2_features = vectorizer.transform(train_df['query2_processed'])
    
    test_q1_features = vectorizer.transform(test_df['query1_processed'])
    test_q2_features = vectorizer.transform(test_df['query2_processed'])
    
    # 计算特征差异和相似度
    from scipy.sparse import hstack, vstack
    
    # 特征拼接
    train_features = hstack([
        train_q1_features,
        train_q2_features,
        abs(train_q1_features - train_q2_features)  # 差异特征
    ])
    
    test_features = hstack([
        test_q1_features,
        test_q2_features,
        abs(test_q1_features - test_q2_features)  # 差异特征
    ])
    
    # 标签
    if 'label' in train_df.columns:
        labels = train_df['label'].values
    else:
        labels = None
        logger.warning("⚠️ 训练集中没有标签列")
    
    logger.info(f"✅ 特征提取完成: 训练集特征形状={train_features.shape}, 测试集特征形状={test_features.shape}")
    
    return train_features, test_features, labels, vectorizer

def train_model(train_features, labels, model_path=None):
    """
    训练模型
    
    Args:
        train_features: 训练特征
        labels: 训练标签
        model_path (str): 模型保存路径
        
    Returns:
        model: 训练好的模型
    """
    logger.info("🔄 开始训练模型...")
    
    # 划分训练集和验证集
    X_train, X_val, y_train, y_val = train_test_split(
        train_features, labels, test_size=0.2, random_state=42
    )
    
    # 设置XGBoost参数
    params = {
        'objective': 'binary:logistic',
        'eval_metric': 'logloss',
        'eta': 0.1,
        'max_depth': 6,
        'min_child_weight': 1,
        'subsample': 0.8,
        'colsample_bytree': 0.8,
        'seed': 42
    }
    
    # 创建DMatrix
    dtrain = xgb.DMatrix(X_train, label=y_train)
    dval = xgb.DMatrix(X_val, label=y_val)
    
    # 训练模型
    start_time = time.time()
    model = xgb.train(
        params,
        dtrain,
        num_boost_round=200,
        evals=[(dtrain, 'train'), (dval, 'val')],
        early_stopping_rounds=20,
        verbose_eval=10
    )
    training_time = time.time() - start_time
    
    # 在验证集上评估
    y_pred = model.predict(dval)
    y_pred_binary = (y_pred > 0.5).astype(int)
    
    accuracy = accuracy_score(y_val, y_pred_binary)
    f1 = f1_score(y_val, y_pred_binary)
    
    logger.info(f"✅ 模型训练完成，耗时: {training_time:.2f}秒")
    logger.info(f"📊 验证集性能: 准确率={accuracy:.4f}, F1分数={f1:.4f}")
    
    # 保存模型
    if model_path is None:
        model_dir = Path("models/saved_models")
        model_dir.mkdir(parents=True, exist_ok=True)
        model_path = model_dir / "model.pkl"
    else:
        model_path = Path(model_path)
        model_path.parent.mkdir(parents=True, exist_ok=True)
    
    joblib.dump(model, model_path)
    logger.info(f"💾 模型已保存: {model_path}")
    
    return model

def predict(model, test_features, vectorizer=None, output_path=None):
    """
    预测并生成提交文件
    
    Args:
        model: 训练好的模型
        test_features: 测试特征
        vectorizer: 向量化器
        output_path (str): 输出文件路径
        
    Returns:
        DataFrame: 预测结果
    """
    logger.info("🔄 开始预测...")
    
    # 创建DMatrix
    dtest = xgb.DMatrix(test_features)
    
    # 预测
    predictions = model.predict(dtest)
    
    # 二值化预测结果
    binary_predictions = (predictions > 0.5).astype(int)
    
    # 创建提交文件
    submission_df = pd.DataFrame({
        'id': range(len(predictions)),
        'label': binary_predictions
    })
    
    # 保存提交文件
    if output_path is None:
        output_dir = Path("data/submissions")
        output_dir.mkdir(parents=True, exist_ok=True)
        output_path = output_dir / "submission.csv"
    else:
        output_path = Path(output_path)
        output_path.parent.mkdir(parents=True, exist_ok=True)
    
    submission_df.to_csv(output_path, index=False)
    logger.info(f"💾 预测结果已保存: {output_path}")
    
    # 保存向量化器
    if vectorizer is not None:
        vectorizer_path = Path("models/saved_models/vectorizer.pkl")
        vectorizer_path.parent.mkdir(parents=True, exist_ok=True)
        joblib.dump(vectorizer, vectorizer_path)
        logger.info(f"💾 向量化器已保存: {vectorizer_path}")
    
    return submission_df

def load_model_and_predict(test_df, model_path=None, vectorizer_path=None, output_path=None):
    """
    加载模型并预测
    
    Args:
        test_df (DataFrame): 测试集
        model_path (str): 模型路径
        vectorizer_path (str): 向量化器路径
        output_path (str): 输出文件路径
        
    Returns:
        DataFrame: 预测结果
    """
    # 设置默认路径
    if model_path is None:
        model_path = Path("models/saved_models/model.pkl")
    else:
        model_path = Path(model_path)
    
    if vectorizer_path is None:
        vectorizer_path = Path("models/saved_models/vectorizer.pkl")
    else:
        vectorizer_path = Path(vectorizer_path)
    
    # 检查模型和向量化器是否存在
    if not model_path.exists() or not vectorizer_path.exists():
        logger.error(f"❌ 模型或向量化器不存在: {model_path}, {vectorizer_path}")
        return None
    
    # 加载模型和向量化器
    logger.info(f"🔄 加载模型: {model_path}")
    model = joblib.load(model_path)
    
    logger.info(f"🔄 加载向量化器: {vectorizer_path}")
    vectorizer = joblib.load(vectorizer_path)
    
    # 预处理文本
    test_df['query1_processed'] = test_df['query1'].apply(preprocess_text)
    test_df['query2_processed'] = test_df['query2'].apply(preprocess_text)
    
    # 转换文本为TF-IDF特征
    test_q1_features = vectorizer.transform(test_df['query1_processed'])
    test_q2_features = vectorizer.transform(test_df['query2_processed'])
    
    # 计算特征差异和相似度
    from scipy.sparse import hstack
    
    # 特征拼接
    test_features = hstack([
        test_q1_features,
        test_q2_features,
        abs(test_q1_features - test_q2_features)  # 差异特征
    ])
    
    # 预测
    return predict(model, test_features, None, output_path)

def main():
    """主函数"""
    # 解析命令行参数
    parser = argparse.ArgumentParser(description="语音助手对话短文本语义匹配")
    parser.add_argument('--train', action='store_true', help='训练模型')
    parser.add_argument('--predict', action='store_true', help='预测测试集')
    parser.add_argument('--train-file', type=str, help='训练文件路径')
    parser.add_argument('--test-file', type=str, help='测试文件路径')
    parser.add_argument('--model-path', type=str, help='模型保存/加载路径')
    parser.add_argument('--output-path', type=str, help='预测结果保存路径')
    parser.add_argument('--use-competition-data', action='store_true', help='使用比赛数据')
    args = parser.parse_args()
    
    logger.info("=" * 60)
    logger.info("🤖对话短文本语义匹配 ")
    logger.info("=" * 60)
    
    # 如果没有指定操作，默认执行完整流程
    if not args.train and not args.predict:
        args.train = True
        args.predict = True
    
    # 加载或生成数据
    if args.use_competition_data:
        train_df, test_df = create_sample_data()  # 这里会加载比赛数据
    elif args.train_file and Path(args.train_file).exists():
        train_df = pd.read_csv(args.train_file)
        logger.info(f"✅ 从 {args.train_file} 加载训练集: {train_df.shape}")
        
        if args.test_file and Path(args.test_file).exists():
            test_df = pd.read_csv(args.test_file)
            logger.info(f"✅ 从 {args.test_file} 加载测试集: {test_df.shape}")
        else:
            _, test_df = _create_sample_data()
    else:
        train_df, test_df = create_sample_data()
    
    # 训练模型
    if args.train:
        # 提取特征
        train_features, test_features, labels, vectorizer = extract_features(train_df, test_df)
        
        # 训练模型
        model = train_model(train_features, labels, args.model_path)
        
        # 如果同时需要预测，直接使用训练好的模型和特征
        if args.predict:
            predict(model, test_features, vectorizer, args.output_path)
    
    # 仅预测
    elif args.predict:
        load_model_and_predict(test_df, args.model_path, None, args.output_path)
    
    logger.info("✅ 程序执行完成!")

if __name__ == "__main__":
    main()
