#!/usr/bin/env python
# -*- coding: utf-8 -*-

"""
模型预测工具
用于读取已保存的模型参数进行情感预测
"""

import os
import json
import torch
import logging
import argparse
import pandas as pd
import numpy as np
from tqdm import tqdm
from transformers import BertTokenizer, BertForSequenceClassification, RobertaTokenizer, \
    RobertaForSequenceClassification, AutoModelForSequenceClassification, AutoTokenizer

# 设置日志
logging.basicConfig(
    format='%(asctime)s - %(levelname)s - %(message)s',
    datefmt='%Y-%m-%d %H:%M:%S',
    level=logging.INFO
)
logger = logging.getLogger(__name__)


class ModelPredictor:
    """模型预测器类，用于加载已保存的模型进行预测"""

    def __init__(self, model_path, device=None):
        """
        初始化模型预测器
        
        参数:
            model_path: 模型保存路径
            device: 运行设备，默认为自动检测
        """
        self.model_path = model_path

        # 设置设备
        if device:
            self.device = torch.device(device)
        else:
            self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

        logger.info(f"使用设备: {self.device}")

        # 加载模型配置
        self.config = self._load_config()

        # 加载模型和分词器
        self.model, self.tokenizer = self._load_model()

        # 模型移动到设备上
        self.model.to(self.device)

    def _load_config(self):
        """
        加载模型配置
        
        返回:
            模型配置字典
        """
        config_path = os.path.join(self.model_path, 'config.json')

        if not os.path.exists(config_path):
            raise FileNotFoundError(f"模型配置文件不存在: {config_path}")

        with open(config_path, 'r') as f:
            config = json.load(f)

        logger.info(f"已加载模型配置: {config}")
        return config

    def _load_model(self):
        """
        加载模型和分词器
        
        返回:
            model: 模型
            tokenizer: 分词器
        """
        model_type = self.config.get('model_type', 'bert').lower()
        max_seq_length = self.config.get('max_seq_length', 128)
        save_format = self.config.get('save_format', 'pytorch')  # 获取保存格式

        # 检查目录中的文件
        if os.path.exists(self.model_path):
            files = os.listdir(self.model_path)
            logger.info(f"模型目录中的文件: {files}")
        else:
            logger.warning(f"模型路径不存在: {self.model_path}")

        try:
            # 根据模型类型加载相应的模型和分词器
            if model_type in ['bert', 'finbert']:
                logger.info(f"正在加载BERT模型: {self.model_path}, 保存格式: {save_format}")

                # 根据保存格式决定加载方式
                if save_format == 'flax':
                    logger.info("使用from_flax=True参数加载Flax格式模型")
                    model = BertForSequenceClassification.from_pretrained(self.model_path, from_flax=True)
                    tokenizer = BertTokenizer.from_pretrained(self.model_path)
                else:
                    try:
                        # 尝试正常加载
                        model = BertForSequenceClassification.from_pretrained(self.model_path)
                        tokenizer = BertTokenizer.from_pretrained(self.model_path)
                    except OSError as e:
                        if "but there is a file for Flax weights" in str(e):
                            logger.info("检测到Flax权重文件，尝试使用from_flax=True加载")
                            model = BertForSequenceClassification.from_pretrained(self.model_path, from_flax=True)
                            tokenizer = BertTokenizer.from_pretrained(self.model_path)
                        else:
                            raise

            elif model_type == 'roberta':
                logger.info(f"正在加载RoBERTa模型: {self.model_path}, 保存格式: {save_format}")

                # 根据保存格式决定加载方式
                if save_format == 'flax':
                    logger.info("使用from_flax=True参数加载Flax格式模型")
                    model = AutoModelForSequenceClassification.from_pretrained(self.model_path, from_flax=True)
                    tokenizer = AutoTokenizer.from_pretrained(self.model_path)
                else:
                    try:
                        model = AutoModelForSequenceClassification.from_pretrained(self.model_path)
                        tokenizer = AutoTokenizer.from_pretrained(self.model_path)
                    except OSError as e:
                        if "but there is a file for Flax weights" in str(e):
                            logger.info("检测到Flax权重文件，尝试使用from_flax=True加载")
                            model = AutoModelForSequenceClassification.from_pretrained(self.model_path, from_flax=True)
                            tokenizer = AutoTokenizer.from_pretrained(self.model_path)
                        else:
                            raise

            elif model_type == 'lstm':
                logger.info(f"正在加载LSTM模型: {self.model_path}")
                # 检查是否存在model_full.pt文件
                model_file = os.path.join(self.model_path, 'model_full.pt')
                if os.path.exists(model_file):
                    logger.info(f"找到LSTM模型文件: {model_file}")
                else:
                    logger.warning(f"LSTM模型文件不存在: {model_file}")

                # 对于LSTM模型，使用torch.load加载
                model = torch.load(model_file, map_location=self.device)
                # 使用通用分词器
                tokenizer = BertTokenizer.from_pretrained("bert-base-chinese")

            else:
                raise ValueError(f"不支持的模型类型: {model_type}")

            logger.info(f"模型加载成功: {model_type}")
            return model, tokenizer

        except Exception as e:
            logger.error(f"模型加载失败: {str(e)}")
            raise

    def predict(self, texts, batch_size=8):
        """
        预测文本的情感
        
        参数:
            texts: 字符串或字符串列表，要预测的文本
            batch_size: 批处理大小
        
        返回:
            预测结果字典或字典列表
        """
        # 如果输入是单个字符串，则转换为列表
        if isinstance(texts, str):
            texts = [texts]
            single_input = True
        else:
            single_input = False

        # 设置模型为评估模式
        self.model.eval()

        # 批处理预测
        results = []
        for i in range(0, len(texts), batch_size):
            batch_texts = texts[i:i + batch_size]
            batch_results = self._predict_batch(batch_texts)
            results.extend(batch_results)

        # 如果输入是单个字符串，则返回单个结果
        return results[0] if single_input else results

    def _predict_batch(self, texts):
        """
        批量预测文本的情感
        
        参数:
            texts: 字符串列表，要预测的文本
        
        返回:
            字典列表，包含预测结果
        """
        max_seq_length = self.config.get('max_seq_length', 128)

        # 对文本进行编码
        inputs = self.tokenizer(
            texts,
            padding=True,
            truncation=True,
            max_length=max_seq_length,
            return_tensors="pt"
        )

        # 将输入移动到设备上
        inputs = {k: v.to(self.device) for k, v in inputs.items()}

        # 预测
        with torch.no_grad():
            outputs = self.model(**inputs)
            logits = outputs.logits if hasattr(outputs, 'logits') else outputs[0]
            probabilities = torch.nn.functional.softmax(logits, dim=1)

        # 将结果转为numpy数组
        probs_numpy = probabilities.cpu().numpy()

        # 获取标签映射
        label_map = self.config.get('label_map', ['负面', '中性', '正面'])

        # 获取预测结果
        results = []
        for i, text in enumerate(texts):
            probs = probs_numpy[i]
            sentiment_idx = np.argmax(probs)
            sentiment = label_map[sentiment_idx]
            confidence = probs[sentiment_idx]

            # 构建结果字典
            result = {
                'text': text,
                'predicted_sentiment': sentiment,
                'confidence': float(confidence),  # 转换为Python原生类型，便于JSON序列化
            }

            # 添加每个类别的概率
            for j, label in enumerate(label_map):
                result[f'{label}_prob'] = float(probs[j])

            results.append(result)

        return results

    def predict_file(self, input_file, output_file, text_column, batch_size=16):
        """
        预测文件中的文本情感

        参数:
            input_file: 输入文件路径
            output_file: 输出文件路径
            text_column: 文本列名
            batch_size: 批处理大小

        返回:
            包含预测结果的DataFrame
        """
        logger.info(f"开始处理文件: {input_file}")

        # need_nums = 31074
        # 读取输入文件
        # df = pd.read_csv(input_file)
        df = pd.read_csv(input_file)
        print(f"原始文件行数: {len(df)}")
        # 获取待预测文本
        texts = df[text_column].fillna("").tolist()


        # print(texts)
        total = len(texts)

        logger.info(f"共 {total} 条文本需要预测")

        # 批量预测
        results = []
        for i in tqdm(range(0, total, batch_size), desc="预测进度"):
            batch_texts = texts[i:i + batch_size]
            batch_results = self.predict(batch_texts)
            results.extend(batch_results)

        self._process_results(df, results)
        # # 将预测结果添加到DataFrame
        # df['sentiment'] = [r['predicted_sentiment'] for r in results]
        # df['confidence'] = [r['confidence'] for r in results]
        #
        # # 获取标签映射
        # label_map = self.config.get('label_map', ['负面', '中性', '正面'])
        #
        # # 添加每个类别的概率
        # for label in label_map:
        #     df[f'{label}_prob'] = [r.get(f'{label}_prob', 0.0) for r in results]

        # 保存结果
        df.to_csv(output_file, index=False)
        logger.info(f"预测结果已保存到: {output_file}")

        return df

    import concurrent.futures
    from tqdm import tqdm

    def _process_results(self, df, results):
        """线程安全的结果处理（参考网页3的数据处理规范）"""
        # 确保结果顺序对齐（重要！）
        df['sentiment'] = [r['predicted_sentiment'] for r in results]
        df['confidence'] = [r['confidence'] for r in results]

        # 概率矩阵处理（参考网页8的数值处理）
        label_map = self.config.get('label_map', ['负面', '中性', '正面'])
        # for idx, label in enumerate(label_map):
        #     df[f'{label}_prob'] = [r['probabilities'][idx] for r in results]


def load_ensemble_model(ensemble_path):
    """
    加载集成模型
    
    参数:
        ensemble_path: 集成模型目录路径
        
    返回:
        predictors: 预测器列表
        weights: 权重列表
    """
    config_path = os.path.join(ensemble_path, 'ensemble_config.json')

    if not os.path.exists(config_path):
        raise FileNotFoundError(f"集成模型配置文件不存在: {config_path}")

    with open(config_path, 'r') as f:
        config = json.load(f)

    model_paths = config['model_paths']
    weights = config.get('weights', [1.0 / len(model_paths)] * len(model_paths))

    predictors = []
    for model_name, path in model_paths.items():
        try:
            predictor = ModelPredictor(path)
            predictors.append(predictor)
            logger.info(f"已加载模型: {model_name}")
        except Exception as e:
            logger.error(f"加载模型 {model_name} 失败: {str(e)}")

    return predictors, weights


def ensemble_predict(predictors, texts, weights=None, batch_size=8):
    """
    集成多个模型进行预测
    
    参数:
        predictors: 预测器列表
        texts: 文本列表
        weights: 模型权重列表，如果为None则使用相等权重
        batch_size: 批处理大小
        
    返回:
        预测结果列表
    """
    # 设置权重
    if weights is None:
        weights = [1.0 / len(predictors)] * len(predictors)

    # 获取每个模型的预测
    all_predictions = []
    for predictor in predictors:
        predictions = predictor.predict(texts, batch_size=batch_size)
        all_predictions.append(predictions)

    # 合并预测
    results = []
    label_map = ['负面', '中性', '正面']  # 默认标签映射

    # 获取第一个预测器的标签映射
    if predictors and hasattr(predictors[0], 'config'):
        label_map = predictors[0].config.get('label_map', label_map)

    # 对每个文本合并预测结果
    for i in range(len(texts)):
        # 初始化概率
        probs = np.zeros(len(label_map))

        # 加权合并各模型的预测概率
        for model_idx, predictions in enumerate(all_predictions):
            pred = predictions[i] if isinstance(predictions, list) else predictions
            for j, label in enumerate(label_map):
                prob_key = f'{label}_prob'
                if prob_key in pred:
                    probs[j] += weights[model_idx] * pred[prob_key]

        # 获取最终预测
        sentiment_idx = np.argmax(probs)
        sentiment = label_map[sentiment_idx]
        confidence = probs[sentiment_idx]

        # 构建结果字典
        result = {
            'text': texts[i],
            'predicted_sentiment': sentiment,
            'confidence': float(confidence)
        }

        # 添加每个类别的概率
        for j, label in enumerate(label_map):
            result[f'{label}_prob'] = float(probs[j])

        results.append(result)

    return results


def ensemble_predict_file(predictors, weights, input_file, output_file, text_column, batch_size=16):
    """
    使用集成模型预测文件
    
    参数:
        predictors: 预测器列表
        weights: 权重列表
        input_file: 输入文件路径
        output_file: 输出文件路径
        text_column: 文本列名
        batch_size: 批处理大小
        
    返回:
        包含预测结果的DataFrame
    """
    logger.info(f"开始处理文件: {input_file}")

    # 读取输入文件
    df = pd.read_csv(input_file)

    # 获取待预测文本
    texts = df[text_column].fillna("").tolist()
    total = len(texts)

    logger.info(f"使用集成模型预测 {total} 条文本")

    # 批量预测
    results = []
    for i in tqdm(range(0, total, batch_size), desc="预测进度"):
        batch_texts = texts[i:i + batch_size]
        batch_results = ensemble_predict(predictors, batch_texts, weights, batch_size)
        results.extend(batch_results)

    # 将预测结果添加到DataFrame
    df['predicted_sentiment'] = [r['predicted_sentiment'] for r in results]
    df['confidence'] = [r['confidence'] for r in results]

    # 获取标签映射
    label_map = ['负面', '中性', '正面']  # 默认标签映射
    if predictors and hasattr(predictors[0], 'config'):
        label_map = predictors[0].config.get('label_map', label_map)

    # 添加每个类别的概率
    for label in label_map:
        df[f'{label}_prob'] = [r.get(f'{label}_prob', 0.0) for r in results]

    # 保存结果
    df.to_csv(output_file, index=False)
    logger.info(f"预测结果已保存到: {output_file}")

    return df


def main():
    """主函数，处理命令行参数并执行预测"""
    parser = argparse.ArgumentParser(description='模型预测工具')

    # 添加子命令
    subparsers = parser.add_subparsers(dest='command', help='子命令')

    # 单个模型预测命令
    single_parser = subparsers.add_parser('single', help='使用单个模型预测')
    single_parser.add_argument('--model_path', type=str, required=True, help='模型路径')
    single_parser.add_argument('--input', type=str, required=True, help='输入文件路径')
    single_parser.add_argument('--output', type=str, required=True, help='输出文件路径')
    single_parser.add_argument('--text_column', type=str, required=True, help='文本列名')
    single_parser.add_argument('--batch_size', type=int, default=16, help='批处理大小')
    single_parser.add_argument('--device', type=str, default=None, help='运行设备 (cuda/cpu)')

    # 集成模型预测命令
    ensemble_parser = subparsers.add_parser('ensemble', help='使用集成模型预测')
    ensemble_parser.add_argument('--ensemble_path', type=str, required=True, help='集成模型路径')
    ensemble_parser.add_argument('--input', type=str, required=True, help='输入文件路径')
    ensemble_parser.add_argument('--output', type=str, required=True, help='输出文件路径')
    ensemble_parser.add_argument('--text_column', type=str, required=True, help='文本列名')
    ensemble_parser.add_argument('--batch_size', type=int, default=16, help='批处理大小')

    # 文本预测命令
    text_parser = subparsers.add_parser('text', help='预测单个文本')
    text_parser.add_argument('--model_path', type=str, required=True, help='模型路径')
    text_parser.add_argument('--text', type=str, required=True, help='要预测的文本')
    text_parser.add_argument('--device', type=str, default=None, help='运行设备 (cuda/cpu)')

    args = parser.parse_args()

    if args.command == 'single':
        # 使用单个模型预测文件
        logger.info(f"尝试加载模型路径: {os.path.abspath(args.model_path)}")
        logger.info(
            f"该目录下的文件: {os.listdir(args.model_path) if os.path.exists(args.model_path) else '路径不存在'}")
        predictor = ModelPredictor(args.model_path, device=args.device)
        predictor.predict_file(args.input, args.output, args.text_column, args.batch_size)

    elif args.command == 'ensemble':
        # 使用集成模型预测文件
        logger.info(f"尝试加载集成模型路径: {os.path.abspath(args.ensemble_path)}")
        logger.info(
            f"该目录下的文件: {os.listdir(args.ensemble_path) if os.path.exists(args.ensemble_path) else '路径不存在'}")
        predictors, weights = load_ensemble_model(args.ensemble_path)
        ensemble_predict_file(predictors, weights, args.input, args.output, args.text_column, args.batch_size)

    elif args.command == 'text':
        # 预测单个文本
        logger.info(f"尝试加载模型路径: {os.path.abspath(args.model_path)}")
        logger.info(
            f"该目录下的文件: {os.listdir(args.model_path) if os.path.exists(args.model_path) else '路径不存在'}")
        predictor = ModelPredictor(args.model_path, device=args.device)
        result = predictor.predict(args.text)

        # 显示结果
        print(f"\n预测文本: {args.text}")
        print(f"预测情感: {result['predicted_sentiment']}")
        print(f"置信度: {result['confidence']:.4f}")

        # 显示各类别概率
        label_map = predictor.config.get('label_map', ['负面', '中性', '正面'])
        for label in label_map:
            print(f"{label}概率: {result.get(f'{label}_prob', 0.0):.4f}")

    else:
        parser.print_help()


# 使用示例
if __name__ == "__main__":
    main()
