#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
语音助手：对话短文本语义匹配 - 主程序

Author: BOSS (牛马)
Date: 2024-06-19
"""

import sys
import pandas as pd
import numpy as np
from pathlib import Path

# 检查依赖
try:
    from sklearn.feature_extraction.text import TfidfVectorizer
    from sklearn.model_selection import train_test_split
    from sklearn.metrics import accuracy_score, f1_score
    import xgboost as xgb
    import joblib
    print("✅ 所有依赖可用")
except ImportError as e:
    print(f"❌ 缺少依赖: {e}")
    print("请安装: pip install pandas scikit-learn xgboost")
    sys.exit(1)


def create_sample_data():
    """创建示例数据"""
    print("🔄 创建示例数据...")

    # 语音助手对话数据
    similar_pairs = [
        ("播放音乐", "我想听音乐", 1), ("打开微信", "启动微信", 1),
        ("查看天气", "今天天气怎么样", 1), ("设置闹钟", "定个闹钟", 1),
        ("拨打电话", "打电话给", 1), ("发送短信", "发个短信", 1),
        ("导航到家", "回家路线", 1), ("搜索餐厅", "找个饭店", 1),
        ("关闭应用", "退出程序", 1), ("调节音量", "声音调大", 1),
        ("拍照", "照相", 1), ("录音", "开始录音", 1),
        ("查看日历", "看看日程", 1), ("设置提醒", "提醒我", 1),
        ("翻译", "帮我翻译", 1), ("计算", "算一下", 1),
        ("查询股票", "看股价", 1), ("订外卖", "点餐", 1),
        ("预约", "帮我预约", 1), ("查看新闻", "看新闻", 1),
    ]

    different_pairs = [
        ("播放音乐", "查看天气", 0), ("打开微信", "设置闹钟", 0),
        ("拨打电话", "拍照", 0), ("发送短信", "导航", 0),
        ("搜索餐厅", "录音", 0), ("关闭应用", "翻译", 0),
        ("调节音量", "计算", 0), ("查看日历", "订外卖", 0),
        ("设置提醒", "查询股票", 0), ("预约", "看新闻", 0),
        ("听音乐", "关灯", 0), ("发微信", "看电影", 0),
        ("查天气", "玩游戏", 0), ("打电话", "学习", 0),
        ("导航", "睡觉", 0), ("搜索", "吃饭", 0),
        ("拍照", "工作", 0), ("录音", "运动", 0),
        ("提醒", "购物", 0), ("翻译", "旅游", 0),
    ]

    # 合并数据
    all_data = similar_pairs + different_pairs
    np.random.shuffle(all_data)

    # 创建DataFrame
    df = pd.DataFrame(all_data, columns=['query1', 'query2', 'label'])
    df['id'] = range(len(df))
    df = df[['id', 'query1', 'query2', 'label']]

    # 分割数据
    train_size = int(len(df) * 0.8)
    train_df = df[:train_size].copy()
    test_df = df[train_size:][['id', 'query1', 'query2']].copy()
    test_df['id'] = range(len(test_df))

    # 保存数据
    data_dir = Path("data/raw")
    data_dir.mkdir(parents=True, exist_ok=True)

    train_path = data_dir / "train.csv"
    test_path = data_dir / "test.csv"

    train_df.to_csv(train_path, index=False)
    test_df.to_csv(test_path, index=False)

    print(f"✅ 数据创建完成: 训练集 {len(train_df)} 样本, 测试集 {len(test_df)} 样本")
    return train_path, test_path


def train_model(train_path):
    """训练模型"""
    print("🔄 训练XGBoost模型...")

    # 读取数据
    train_df = pd.read_csv(train_path)

    # 特征提取
    texts = [f"{row['query1']} {row['query2']}" for _, row in train_df.iterrows()]
    labels = train_df['label'].values

    # TF-IDF向量化
    vectorizer = TfidfVectorizer(max_features=1000, ngram_range=(1, 2))
    features = vectorizer.fit_transform(texts)

    # 分割训练验证集
    X_train, X_val, y_train, y_val = train_test_split(
        features, labels, test_size=0.2, random_state=42
    )

    # 训练模型
    model = xgb.XGBClassifier(n_estimators=100, max_depth=6, learning_rate=0.1, random_state=42)
    model.fit(X_train, y_train)

    # 验证
    val_pred = model.predict(X_val)
    accuracy = accuracy_score(y_val, val_pred)
    f1 = f1_score(y_val, val_pred)

    print(f"✅ 模型训练完成! 准确率: {accuracy:.4f}, F1: {f1:.4f}")

    # 保存模型
    models_dir = Path("models/saved_models")
    models_dir.mkdir(parents=True, exist_ok=True)

    joblib.dump(model, models_dir / "model.pkl")
    joblib.dump(vectorizer, models_dir / "vectorizer.pkl")

    return model, vectorizer


def predict(test_path, model, vectorizer):
    """预测"""
    print("🔄 预测测试数据...")

    # 读取测试数据
    test_df = pd.read_csv(test_path)

    # 特征提取
    test_texts = [f"{row['query1']} {row['query2']}" for _, row in test_df.iterrows()]
    test_features = vectorizer.transform(test_texts)

    # 预测
    predictions = model.predict_proba(test_features)[:, 1]
    binary_predictions = (predictions >= 0.5).astype(int)

    # 保存结果
    submission_dir = Path("data/submissions")
    submission_dir.mkdir(parents=True, exist_ok=True)

    result_df = pd.DataFrame({
        'id': test_df['id'],
        'label': binary_predictions
    })

    submission_path = submission_dir / "submission.csv"
    result_df.to_csv(submission_path, index=False)

    print(f"✅ 预测完成! 结果保存至: {submission_path}")

    # 显示预测示例
    print("\n📊 预测结果示例:")
    for i in range(min(5, len(test_df))):
        row = test_df.iloc[i]
        prob = predictions[i]
        label = "相似" if binary_predictions[i] == 1 else "不相似"
        print(f"'{row['query1']}' vs '{row['query2']}': {prob:.3f} ({label})")

    return predictions


def main():
    """主函数"""
    print("🤖 语音助手：对话短文本语义匹配")
    print("Author: BOSS (牛马)")
    print("=" * 50)

    try:
        # 1. 创建数据
        train_path, test_path = create_sample_data()

        # 2. 训练模型
        model, vectorizer = train_model(train_path)

        # 3. 预测
        predictions = predict(test_path, model, vectorizer)

        print("\n🎉 完整流程执行成功!")
        print("📁 生成的文件:")
        print("   - data/raw/train.csv (训练数据)")
        print("   - data/raw/test.csv (测试数据)")
        print("   - models/saved_models/model.pkl (训练好的模型)")
        print("   - data/submissions/submission.csv (预测结果)")

    except Exception as e:
        print(f"❌ 执行失败: {e}")
        import traceback
        traceback.print_exc()


if __name__ == "__main__":
    main()
