#!/usr/bin/env python3
"""
离线版本的BERT意图识别模型训练脚本
当无法连接到Hugging Face时使用本地简单模型
"""

import sys
import argparse
from pathlib import Path
import pandas as pd
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import accuracy_score, classification_report
import joblib
import json

# 添加项目根目录到路径
project_root = Path(__file__).parent.parent
sys.path.append(str(project_root))

from src.data.data_loader import IntentDataLoader
from src.utils.logger import setup_logger


class SimpleTfidfIntentClassifier:
    """简单的TF-IDF + 逻辑回归意图分类器"""
    
    def __init__(self, max_features=10000, max_df=0.95, min_df=2):
        """
        初始化分类器
        
        Args:
            max_features: 最大特征数
            max_df: 最大文档频率
            min_df: 最小文档频率
        """
        self.vectorizer = TfidfVectorizer(
            max_features=max_features,
            max_df=max_df,
            min_df=min_df,
            stop_words=None,  # 中文不使用英文停用词
            ngram_range=(1, 2)  # 使用1-gram和2-gram
        )
        self.classifier = LogisticRegression(
            random_state=42,
            max_iter=1000,
            multi_class='ovr'
        )
        self.label_mapping = {}
        self.trained = False
    
    def fit(self, texts, labels):
        """训练模型"""
        # 文本向量化
        X = self.vectorizer.fit_transform(texts)
        
        # 标签编码
        unique_labels = sorted(set(labels))
        self.label_mapping = {label: i for i, label in enumerate(unique_labels)}
        self.reverse_mapping = {i: label for label, i in self.label_mapping.items()}
        
        y = [self.label_mapping[label] for label in labels]
        
        # 训练分类器
        self.classifier.fit(X, y)
        self.trained = True
        
        return self
    
    def predict(self, texts):
        """预测"""
        if not self.trained:
            raise ValueError("模型尚未训练")
        
        X = self.vectorizer.transform(texts)
        y_pred = self.classifier.predict(X)
        
        return [self.reverse_mapping[pred] for pred in y_pred]
    
    def predict_proba(self, texts):
        """预测概率"""
        if not self.trained:
            raise ValueError("模型尚未训练")
        
        X = self.vectorizer.transform(texts)
        return self.classifier.predict_proba(X)
    
    def save(self, model_path, mappings_path):
        """保存模型"""
        if not self.trained:
            raise ValueError("模型尚未训练")
        
        # 保存模型
        model_data = {
            'vectorizer': self.vectorizer,
            'classifier': self.classifier,
            'label_mapping': self.label_mapping,
            'reverse_mapping': self.reverse_mapping
        }
        joblib.dump(model_data, model_path)
        
        # 保存映射信息
        mappings = {
            'label_mapping': self.label_mapping,
            'reverse_mapping': self.reverse_mapping,
            'num_classes': len(self.label_mapping),
            'model_type': 'TfidfLogisticRegression'
        }
        
        with open(mappings_path, 'w', encoding='utf-8') as f:
            json.dump(mappings, f, ensure_ascii=False, indent=2)
    
    @classmethod
    def load(cls, model_path):
        """加载模型"""
        model_data = joblib.load(model_path)
        
        instance = cls()
        instance.vectorizer = model_data['vectorizer']
        instance.classifier = model_data['classifier']
        instance.label_mapping = model_data['label_mapping']
        instance.reverse_mapping = model_data['reverse_mapping']
        instance.trained = True
        
        return instance


def parse_arguments():
    """解析命令行参数"""
    parser = argparse.ArgumentParser(description='离线意图识别模型训练')
    
    parser.add_argument(
        '--data', 
        type=str,
        required=True,
        help='训练数据文件路径 (CSV格式)'
    )
    
    parser.add_argument(
        '--output-dir', 
        type=str,
        default='./models/offline',
        help='模型输出目录'
    )
    
    parser.add_argument(
        '--test-size', 
        type=float,
        default=0.2,
        help='测试集比例'
    )
    
    parser.add_argument(
        '--val-size', 
        type=float,
        default=0.1,
        help='验证集比例'
    )
    
    return parser.parse_args()


def main():
    """主函数"""
    args = parse_arguments()
    
    # 设置日志
    logger = setup_logger('offline_train', log_level='INFO')
    
    try:
        logger.info("开始离线意图识别模型训练")
        
        # 1. 加载数据
        logger.info(f"从 {args.data} 加载数据")
        data_loader = IntentDataLoader()
        df = data_loader.load_from_csv(args.data)
        
        # 验证数据
        if not data_loader.validate_data(df):
            raise ValueError("数据验证失败")
        
        # 获取数据统计
        stats = data_loader.get_data_statistics(df)
        logger.info(f"数据统计: {stats}")
        
        # 2. 分割数据
        logger.info("分割数据集")
        train_df, test_df = train_test_split(
            df, 
            test_size=args.test_size, 
            random_state=42, 
            stratify=df['intent']
        )
        
        train_df, val_df = train_test_split(
            train_df, 
            test_size=args.val_size/(1-args.test_size), 
            random_state=42, 
            stratify=train_df['intent']
        )
        
        logger.info(f"训练集: {len(train_df)}, 验证集: {len(val_df)}, 测试集: {len(test_df)}")
        
        # 3. 创建和训练模型
        logger.info("创建TF-IDF分类器")
        classifier = SimpleTfidfIntentClassifier()
        
        logger.info("训练模型")
        classifier.fit(train_df['text'].tolist(), train_df['intent'].tolist())
        
        # 4. 评估模型
        logger.info("评估模型")
        
        # 验证集评估
        val_pred = classifier.predict(val_df['text'].tolist())
        val_accuracy = accuracy_score(val_df['intent'].tolist(), val_pred)
        logger.info(f"验证集准确率: {val_accuracy:.4f}")
        
        # 测试集评估
        test_pred = classifier.predict(test_df['text'].tolist())
        test_accuracy = accuracy_score(test_df['intent'].tolist(), test_pred)
        logger.info(f"测试集准确率: {test_accuracy:.4f}")
        
        # 详细分类报告
        report = classification_report(test_df['intent'].tolist(), test_pred, output_dict=True)
        logger.info("分类报告:")
        for class_name, metrics in report.items():
            if isinstance(metrics, dict) and 'precision' in metrics:
                logger.info(f"  {class_name}: P={metrics['precision']:.3f}, R={metrics['recall']:.3f}, F1={metrics['f1-score']:.3f}")
        
        # 5. 保存模型
        output_dir = Path(args.output_dir)
        output_dir.mkdir(parents=True, exist_ok=True)
        
        model_path = output_dir / "model.joblib"
        mappings_path = output_dir / "mappings.json"
        
        logger.info(f"保存模型到 {model_path}")
        classifier.save(str(model_path), str(mappings_path))
        
        # 保存评估结果
        # 转换numpy类型为Python原生类型
        def convert_numpy_types(obj):
            """递归转换numpy类型为Python原生类型"""
            if isinstance(obj, np.integer):
                return int(obj)
            elif isinstance(obj, np.floating):
                return float(obj)
            elif isinstance(obj, np.ndarray):
                return obj.tolist()
            elif isinstance(obj, dict):
                return {key: convert_numpy_types(value) for key, value in obj.items()}
            elif isinstance(obj, list):
                return [convert_numpy_types(item) for item in obj]
            else:
                return obj
        
        results = {
            'validation_accuracy': val_accuracy,
            'test_accuracy': test_accuracy,
            'classification_report': report,
            'data_stats': convert_numpy_types(stats)
        }
        
        results_path = output_dir / "results.json"
        with open(results_path, 'w', encoding='utf-8') as f:
            json.dump(results, f, ensure_ascii=False, indent=2)
        
        logger.info("🎉 离线训练完成！")
        logger.info(f"模型已保存到: {output_dir}")
        logger.info(f"测试集准确率: {test_accuracy:.4f}")
        
    except Exception as e:
        logger.error(f"训练过程中发生错误: {e}")
        import traceback
        logger.error(f"错误详情:\n{traceback.format_exc()}")
        sys.exit(1)


if __name__ == "__main__":
    main()
