#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
自定义分类训练器
提供简单的界面来训练用户自定义的分类模型
"""

import os
import json
from typing import List, Dict
from transformers import BertTokenizer, BertForSequenceClassification, AdamW
from torch.utils.data import Dataset, DataLoader
import torch


class CustomDataset(Dataset):
    """自定义数据集"""
    
    def __init__(self, texts: List[str], labels: List[int], tokenizer, max_length: int = 128):
        self.texts = texts
        self.labels = labels
        self.tokenizer = tokenizer
        self.max_length = max_length
    
    def __len__(self):
        return len(self.texts)
    
    def __getitem__(self, idx):
        text = str(self.texts[idx])
        label = self.labels[idx]
        
        encoding = self.tokenizer.encode_plus(
            text,
            add_special_tokens=True,
            max_length=self.max_length,
            padding='max_length',
            truncation=True,
            return_tensors='pt'
        )
        
        return {
            'input_ids': encoding['input_ids'].flatten(),
            'attention_mask': encoding['attention_mask'].flatten(),
            'label': torch.tensor(label, dtype=torch.long)
        }


class CustomClassifierTrainer:
    """自定义分类训练器"""
    
    def __init__(self, model_name: str = 'bert-base-chinese'):
        self.model_name = model_name
        self.model = None
        self.tokenizer = None
        self.labels = []
        self.label_map = {}
        
    def prepare_data(self, training_data: List[Dict[str, str]]):
        """准备训练数据
        
        Args:
            training_data: 包含文本和标签的字典列表
                         例如: [{'text': '在麦当劳用餐', 'label': '餐饮'},
                               {'text': '乘坐公交车', 'label': '交通'}]
        """
        # 提取所有唯一的一级标签
        self.labels = list(set([item['label'] for item in training_data]))
        self.label_map = {label: i for i, label in enumerate(self.labels)}
        
        # 准备文本和标签ID
        texts = [item['text'] for item in training_data]
        label_names = [item['label'] for item in training_data]
        label_ids = [self.label_map[name] for name in label_names]
        
        return texts, label_ids
        
    def train(self, training_data: List[Dict[str, str]], 
              output_path: str = './model_cache/custom_classifier',
              epochs: int = 3, 
              batch_size: int = 8, 
              learning_rate: float = 2e-5,
              max_length: int = 128):
        """训练自定义分类模型
        
        Args:
            training_data: 训练数据
            output_path: 模型保存路径
            epochs: 训练轮数
            batch_size: 批次大小
            learning_rate: 学习率
            max_length: 文本最大长度
        """
        # 准备数据
        texts, label_ids = self.prepare_data(training_data)
        
        # 检查本地是否存在预训练模型，如果存在则使用本地模型
        local_model_path = '../model_cache/bert-base-chinese'
        model_name_or_path = local_model_path if os.path.exists(local_model_path) else self.model_name
        
        # 加载预训练的tokenizer和模型
        try:
            self.tokenizer = BertTokenizer.from_pretrained(model_name_or_path, local_files_only=True)
            self.model = BertForSequenceClassification.from_pretrained(
                model_name_or_path, 
                num_labels=len(self.labels),
                local_files_only=True,
                ignore_mismatched_sizes=True
            )
        except Exception as e:
            print(f"加载模型时出错: {e}")
            # 尝试使用绝对路径
            abs_model_path = os.path.abspath(model_name_or_path)
            print(f"尝试使用绝对路径: {abs_model_path}")
            self.tokenizer = BertTokenizer.from_pretrained(abs_model_path, local_files_only=True)
            self.model = BertForSequenceClassification.from_pretrained(
                abs_model_path, 
                num_labels=len(self.labels),
                local_files_only=True,
                ignore_mismatched_sizes=True
            )
        
        # 创建数据集和数据加载器
        dataset = CustomDataset(texts, label_ids, self.tokenizer, max_length)
        dataloader = DataLoader(dataset, batch_size=batch_size, shuffle=True)
        
        # 设置优化器
        optimizer = AdamW(self.model.parameters(), lr=learning_rate)
        
        # 训练模型
        self.model.train()
        for epoch in range(epochs):
            total_loss = 0
            for batch in dataloader:
                optimizer.zero_grad()
                
                input_ids = batch['input_ids']
                attention_mask = batch['attention_mask']
                labels = batch['label']
                
                outputs = self.model(
                    input_ids=input_ids, 
                    attention_mask=attention_mask, 
                    labels=labels
                )
                loss = outputs.loss
                loss.backward()
                optimizer.step()
                
                total_loss += loss.item()
                
            avg_loss = total_loss / len(dataloader)
            print(f"Epoch: {epoch+1}/{epochs}, Average Loss: {avg_loss}")
        
        # 保存模型
        os.makedirs(output_path, exist_ok=True)
        self.model.save_pretrained(output_path)
        self.tokenizer.save_pretrained(output_path)
        
        # 保存标签映射
        label_mapping_path = os.path.join(output_path, 'label_mapping.json')
        with open(label_mapping_path, 'w', encoding='utf-8') as f:
            json.dump(self.label_map, f, ensure_ascii=False, indent=2)
        
        print(f"自定义分类模型已训练并保存到 {output_path}")
        print(f"标签映射已保存到 {label_mapping_path}")
        
    def load_model(self, model_path: str = '../model_cache/custom_classifier'):
        """加载已训练的模型
        
        Args:
            model_path: 模型路径
        """
        self.tokenizer = BertTokenizer.from_pretrained(model_path)
        self.model = BertForSequenceClassification.from_pretrained(model_path, ignore_mismatched_sizes=True)
        
        # 加载标签映射
        label_mapping_path = os.path.join(model_path, 'label_mapping.json')
        if os.path.exists(label_mapping_path):
            with open(label_mapping_path, 'r', encoding='utf-8') as f:
                self.label_map = json.load(f)
            self.labels = [None] * len(self.label_map)
            for label, idx in self.label_map.items():
                self.labels[idx] = label
        
    def predict(self, text: str) -> str:
        """预测文本分类
        
        Args:
            text: 要分类的文本
            
        Returns:
            预测的标签
        """
        if self.model is None or self.tokenizer is None:
            raise ValueError("模型未加载，请先调用load_model方法")
            
        self.model.eval()
        encoding = self.tokenizer.encode_plus(
            text,
            add_special_tokens=True,
            max_length=128,
            padding='max_length',
            truncation=True,
            return_tensors='pt'
        )
        
        input_ids = encoding['input_ids']
        attention_mask = encoding['attention_mask']
        
        with torch.no_grad():
            outputs = self.model(input_ids=input_ids, attention_mask=attention_mask)
            predictions = torch.nn.functional.softmax(outputs.logits, dim=-1)
            predicted_id = torch.argmax(predictions, dim=-1).item()
            
        return self.labels[predicted_id]
        
    def prepare_data_with_subcategories(self, training_data: List[Dict[str, str]]):
        """准备二级分类训练数据
        
        Args:
            training_data: 包含文本、一级标签和二级标签的字典列表
                         例如: [{'text': '在麦当劳用餐', 'primary_label': '餐饮', 'secondary_label': '外卖'},
                               {'text': '乘坐公交车', 'primary_label': '交通', 'secondary_label': '公交'}]
        """
        # 提取所有唯一的一级标签和二级标签组合
        self.labels = list(set([f"{item['primary_label']}|{item['secondary_label']}" for item in training_data]))
        self.label_map = {label: i for i, label in enumerate(self.labels)}
        
        # 准备文本和标签ID
        texts = [item['text'] for item in training_data]
        label_names = [f"{item['primary_label']}|{item['secondary_label']}" for item in training_data]
        label_ids = [self.label_map[name] for name in label_names]
        
        return texts, label_ids
        
    def predict_with_subcategories(self, text: str) -> tuple:
        """预测文本的一级和二级分类
        
        Args:
            text: 要分类的文本
            
        Returns:
            预测的一级标签和二级标签元组
        """
        if self.model is None or self.tokenizer is None:
            raise ValueError("模型未加载，请先调用load_model方法")
            
        self.model.eval()
        encoding = self.tokenizer.encode_plus(
            text,
            add_special_tokens=True,
            max_length=128,
            padding='max_length',
            truncation=True,
            return_tensors='pt'
        )
        
        input_ids = encoding['input_ids']
        attention_mask = encoding['attention_mask']
        
        with torch.no_grad():
            outputs = self.model(input_ids=input_ids, attention_mask=attention_mask)
            predictions = torch.nn.functional.softmax(outputs.logits, dim=-1)
            predicted_id = torch.argmax(predictions, dim=-1).item()
            
        # 分割一级标签和二级标签
        predicted_label = self.labels[predicted_id]
        if '|' in predicted_label:
            primary_label, secondary_label = predicted_label.split('|', 1)
            return primary_label, secondary_label
        else:
            return predicted_label, "其他"


def main():
    """主函数，用于测试自定义分类训练器"""
    # 示例训练数据
    training_data = [
        {'text': '在麦当劳用餐', 'label': '餐饮'},
        {'text': '购买午餐', 'label': '餐饮'},
        {'text': '乘坐地铁', 'label': '交通'},
        {'text': '打车回家', 'label': '交通'},
        {'text': '购买书籍', 'label': '教育'},
        {'text': '购买文具', 'label': '教育'},
        {'text': '看电影', 'label': '娱乐'},
        {'text': '购买游戏', 'label': '娱乐'},
        {'text': '购买衣服', 'label': '购物'},
        {'text': '购买电子产品', 'label': '购物'}
    ]
    
    # 创建训练器并训练模型
    trainer = CustomClassifierTrainer()
    trainer.train(training_data, epochs=2)
    
    # 测试预测功能
    trainer.load_model('./model_cache/custom_classifier')
    test_text = '在肯德基吃饭'
    predicted_label = trainer.predict(test_text)
    print(f"文本 '{test_text}' 的预测分类是: {predicted_label}")


if __name__ == "__main__":
    main()