"""
问题分类模型
基于BERT的文本分类模型，自动识别问题类型和严重程度
"""

import torch
import torch.nn as nn
import pandas as pd
import numpy as np
import logging
from typing import Dict, List, Tuple, Optional
from modelscope import (
    AutoTokenizer, AutoModel, AutoConfig,
    Model, snapshot_download
)
from modelscope.trainers import build_trainer
from modelscope.utils.constant import Tasks
from torch.utils.data import Dataset, DataLoader
from sklearn.model_selection import train_test_split
from sklearn.metrics import classification_report, confusion_matrix, accuracy_score
from sklearn.preprocessing import LabelEncoder
import matplotlib.pyplot as plt
import seaborn as sns
from config.model_config import config

class SupervisionDataset(Dataset):
    """督导数据集"""
    
    def __init__(self, texts: List[str], labels: List[str], tokenizer, max_length: int = 512):
        self.texts = texts
        self.labels = labels
        self.tokenizer = tokenizer
        self.max_length = max_length
    
    def __len__(self):
        return len(self.texts)
    
    def __getitem__(self, idx):
        text = str(self.texts[idx])
        label = self.labels[idx]
        
        encoding = self.tokenizer(
            text,
            truncation=True,
            padding='max_length',
            max_length=self.max_length,
            return_tensors='pt'
        )
        
        return {
            'input_ids': encoding['input_ids'].flatten(),
            'attention_mask': encoding['attention_mask'].flatten(),
            'labels': torch.tensor(label, dtype=torch.long)
        }

class MultiTaskBERTClassifier(nn.Module):
    """多任务BERT分类器"""
    
    def __init__(self, model_name: str, num_problem_types: int, num_severity_levels: int, dropout_rate: float = 0.3):
        super().__init__()
        
        # 使用ModelScope加载预训练模型
        self.bert = AutoModel.from_pretrained(model_name)
        self.dropout = nn.Dropout(dropout_rate)
        
        # 问题类型分类头
        self.problem_type_classifier = nn.Linear(self.bert.config.hidden_size, num_problem_types)
        
        # 严重程度分类头
        self.severity_classifier = nn.Linear(self.bert.config.hidden_size, num_severity_levels)
        
    def forward(self, input_ids, attention_mask, labels=None):
        outputs = self.bert(input_ids=input_ids, attention_mask=attention_mask)
        pooled_output = outputs.pooler_output
        pooled_output = self.dropout(pooled_output)
        
        # 问题类型预测
        problem_type_logits = self.problem_type_classifier(pooled_output)
        
        # 严重程度预测
        severity_logits = self.severity_classifier(pooled_output)
        
        loss = None
        if labels is not None:
            loss_fct = nn.CrossEntropyLoss()
            problem_type_labels = labels[:, 0]
            severity_labels = labels[:, 1]
            
            problem_type_loss = loss_fct(problem_type_logits, problem_type_labels)
            severity_loss = loss_fct(severity_logits, severity_labels)
            
            loss = problem_type_loss + severity_loss
        
        return {
            'loss': loss,
            'problem_type_logits': problem_type_logits,
            'severity_logits': severity_logits
        }

class ProblemClassificationModel:
    """问题分类模型"""
    
    def __init__(self):
        self.logger = logging.getLogger(__name__)
        self.model = None
        self.tokenizer = None
        self.problem_type_encoder = LabelEncoder()
        self.severity_encoder = LabelEncoder()
        self.model_config = config.problem_classification
        
        # 设置设备
        self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
        self.logger.info(f"使用设备: {self.device}")
        
        # 初始化tokenizer
        self._initialize_tokenizer()
    
    def _initialize_tokenizer(self):
        """初始化tokenizer"""
        try:
            self.tokenizer = AutoTokenizer.from_pretrained(self.model_config.base_model)
            self.logger.info(f"已加载tokenizer: {self.model_config.base_model}")
        except Exception as e:
            self.logger.error(f"加载tokenizer失败: {e}")
            # 备用方案：使用BERT中文tokenizer
            self.tokenizer = AutoTokenizer.from_pretrained("damo/nlp_structbert_backbone_base_std")
    
    def predict(self, texts: List[str]) -> List[Dict]:
        """预测问题类型和严重程度"""
        
        if self.model is None or self.tokenizer is None:
            raise ValueError("模型尚未训练，请先调用train()方法")
        
        results = []
        
        for text in texts:
            # 分词
            encoding = self.tokenizer(
                text,
                truncation=True,
                padding='max_length',
                max_length=self.model_config.max_length,
                return_tensors='pt'
            )
            
            # 移动到设备
            input_ids = encoding['input_ids'].to(self.device)
            attention_mask = encoding['attention_mask'].to(self.device)
            
            # 预测
            with torch.no_grad():
                outputs = self.model(input_ids=input_ids, attention_mask=attention_mask)
                
                problem_type_probs = torch.softmax(outputs['problem_type_logits'], dim=-1)
                severity_probs = torch.softmax(outputs['severity_logits'], dim=-1)
                
                problem_type_pred = torch.argmax(problem_type_probs, dim=-1).cpu().numpy()[0]
                severity_pred = torch.argmax(severity_probs, dim=-1).cpu().numpy()[0]
                
                # 解码预测结果
                predicted_problem_type = self.problem_type_encoder.inverse_transform([problem_type_pred])[0]
                predicted_severity = self.severity_encoder.inverse_transform([severity_pred])[0]
                
                results.append({
                    'text': text,
                    'predicted_problem_type': predicted_problem_type,
                    'predicted_severity': predicted_severity,
                    'problem_type_confidence': float(torch.max(problem_type_probs).cpu()),
                    'severity_confidence': float(torch.max(severity_probs).cpu())
                })
        
        return results 