#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
XOS AI模型训练脚本
基于BIRD数据集和XOS系统自定义SQL数据进行训练
目标：自然语言 -> SQL查询生成
"""

import json
import os
import logging
import pandas as pd
import numpy as np
from pathlib import Path
from typing import List, Dict, Tuple, Any
import torch
from torch.utils.data import Dataset, DataLoader
from transformers import (
    T5ForConditionalGeneration,
    T5Tokenizer,
    TrainingArguments,
    Trainer
)
from torch.optim import AdamW
from transformers import get_linear_schedule_with_warmup
from sklearn.model_selection import train_test_split
import matplotlib.pyplot as plt
import seaborn as sns
from tqdm import tqdm
import warnings
warnings.filterwarnings('ignore')

# 设置日志
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
logger = logging.getLogger(__name__)

class XOSConfig:
    """XOS训练配置类"""
    def __init__(self):
        # 路径配置
        self.base_dir = Path(__file__).parent
        self.data_dir = self.base_dir / "src" / "data"
        self.bird_dir = self.base_dir / "src" / "datasets" / "bird"
        self.models_dir = self.base_dir / "models"
        self.models_dir.mkdir(exist_ok=True)
        
        # 模型配置 - GPU优化
        self.model_name = "google/flan-t5-base"  # 使用T5作为基础模型
        self.max_input_length = 512
        self.max_target_length = 256
        self.batch_size = 16  # 增加批次大小，充分利用GPU
        self.num_epochs = 5   # 减少轮数，GPU训练更快
        self.learning_rate = 5e-4  # 提高学习率
        self.warmup_steps = 500    # 减少预热步数
        
        # 训练配置 - GPU优化
        self.train_ratio = 0.8
        self.val_ratio = 0.1
        self.test_ratio = 0.1
        self.save_steps = 500   # 更频繁保存
        self.eval_steps = 250   # 更频繁评估
        self.logging_steps = 50 # 更频繁日志

class XOSDataProcessor:
    """XOS数据处理器"""
    
    def __init__(self, config: XOSConfig):
        self.config = config
        
    def load_xos_data(self) -> List[Dict[str, Any]]:
        """加载XOS系统自定义数据"""
        logger.info("Loading XOS custom data...")
        xos_data = []
        
        try:
            # 加载完整的AI训练数据
            with open(self.config.data_dir / "complete_ai_training_data.json", 'r', encoding='utf-8') as f:
                complete_data = json.load(f)
            
            # 加载SQL模式
            with open(self.config.data_dir / "sql_patterns.json", 'r', encoding='utf-8') as f:
                sql_patterns = json.load(f)
            
            # 生成XOS特定的训练样本
            xos_samples = [
                {
                    "question": "查询所有用户的基本信息",
                    "sql": "SELECT id, username, email, real_name, phone, status FROM users WHERE status = 'active'",
                    "db_id": "xos_system",
                    "evidence": "查询活跃用户的基本信息字段"
                },
                {
                    "question": "获取用户的角色信息",
                    "sql": "SELECT u.username, r.name as role_name FROM users u JOIN user_roles ur ON u.id = ur.user_id JOIN roles r ON ur.role_id = r.id WHERE u.username = ?",
                    "db_id": "xos_system",
                    "evidence": "通过用户名查询用户的角色信息"
                },
                {
                    "question": "查看用户的所有权限",
                    "sql": "SELECT u.username, p.code as permission_code, p.name as permission_name FROM users u JOIN user_roles ur ON u.id = ur.user_id JOIN roles r ON ur.role_id = r.id JOIN role_permissions rp ON r.id = rp.role_id JOIN permissions p ON rp.permission_id = p.id WHERE u.username = ?",
                    "db_id": "xos_system",
                    "evidence": "查询指定用户的所有权限信息"
                },
                {
                    "question": "获取动态表格配置",
                    "sql": "SELECT tc.*, tc2.* FROM table_configs tc JOIN table_columns tc2 ON tc.id = tc2.table_config_id WHERE tc.table_name = ? ORDER BY tc2.sort_order",
                    "db_id": "xos_system",
                    "evidence": "查询指定表的配置和列信息"
                },
                {
                    "question": "查询产品库存信息",
                    "sql": "SELECT p.name, p.sku, i.current_stock, i.min_stock FROM products p JOIN inventory i ON p.id = i.product_id WHERE i.current_stock < i.min_stock",
                    "db_id": "xos_system",
                    "evidence": "查询库存不足的产品信息"
                },
                {
                    "question": "获取客户订单统计",
                    "sql": "SELECT c.name, COUNT(o.id) as order_count, SUM(o.total_amount) as total_amount FROM customers c JOIN orders o ON c.id = o.customer_id GROUP BY c.id, c.name ORDER BY total_amount DESC",
                    "db_id": "xos_system",
                    "evidence": "按客户统计订单数量和总金额"
                },
                {
                    "question": "查看最近的系统日志",
                    "sql": "SELECT * FROM system_logs WHERE created_at >= DATE_SUB(NOW(), INTERVAL 24 HOUR) ORDER BY created_at DESC LIMIT 100",
                    "db_id": "xos_system",
                    "evidence": "查询最近24小时的系统日志"
                }
            ]
            
            # 添加SQL模式中的查询
            for pattern_key, pattern_info in sql_patterns.items():
                xos_samples.append({
                    "question": pattern_info["description"],
                    "sql": pattern_info["pattern"],
                    "db_id": "xos_system",
                    "evidence": f"XOS系统{pattern_info['description']}"
                })
            
            xos_data.extend(xos_samples)
            logger.info(f"Loaded {len(xos_data)} XOS custom samples")
            
        except Exception as e:
            logger.error(f"Error loading XOS data: {e}")
            
        return xos_data
    
    def load_bird_data(self, limit: int = None) -> List[Dict[str, Any]]:
        """加载BIRD数据集"""
        logger.info("Loading BIRD dataset...")
        bird_data = []
        
        try:
            # 加载训练数据
            train_file = self.config.bird_dir / "train" / "train.json"
            if train_file.exists():
                with open(train_file, 'r', encoding='utf-8') as f:
                    train_data = json.load(f)
                
                if limit:
                    train_data = train_data[:limit]
                
                bird_data.extend(train_data)
                logger.info(f"Loaded {len(train_data)} BIRD training samples")
            
            # 加载开发数据
            dev_file = self.config.bird_dir / "dev_20240627" / "dev.json"
            if dev_file.exists():
                with open(dev_file, 'r', encoding='utf-8') as f:
                    dev_data = json.load(f)
                
                if limit:
                    dev_data = dev_data[:limit//10]  # 取较少的dev数据
                
                bird_data.extend(dev_data)
                logger.info(f"Loaded {len(dev_data)} BIRD dev samples")
                
        except Exception as e:
            logger.error(f"Error loading BIRD data: {e}")
            
        return bird_data
    
    def preprocess_data(self, data: List[Dict[str, Any]]) -> List[Dict[str, str]]:
        """预处理数据，转换为训练格式"""
        logger.info("Preprocessing data...")
        processed_data = []
        
        for item in data:
            try:
                # 构建输入文本（自然语言问题 + 上下文）
                question = item.get("question", "")
                evidence = item.get("evidence", "")
                db_id = item.get("db_id", "")
                
                # 构建输入提示
                input_text = f"Generate SQL for: {question}"
                if evidence:
                    input_text += f" Evidence: {evidence}"
                if db_id:
                    input_text += f" Database: {db_id}"
                
                # 目标SQL
                target_sql = item.get("SQL", item.get("sql", ""))
                
                if input_text and target_sql:
                    processed_data.append({
                        "input": input_text,
                        "target": target_sql
                    })
                    
            except Exception as e:
                logger.warning(f"Error processing item: {e}")
                continue
        
        logger.info(f"Processed {len(processed_data)} samples")
        return processed_data

class XOSDataset(Dataset):
    """XOS训练数据集"""
    
    def __init__(self, data: List[Dict[str, str]], tokenizer, max_input_length: int, max_target_length: int):
        self.data = data
        self.tokenizer = tokenizer
        self.max_input_length = max_input_length
        self.max_target_length = max_target_length
    
    def __len__(self):
        return len(self.data)
    
    def __getitem__(self, idx):
        item = self.data[idx]
        
        # 编码输入
        input_encoding = self.tokenizer(
            item["input"],
            max_length=self.max_input_length,
            padding="max_length",
            truncation=True,
            return_tensors="pt"
        )
        
        # 编码目标
        target_encoding = self.tokenizer(
            item["target"],
            max_length=self.max_target_length,
            padding="max_length",
            truncation=True,
            return_tensors="pt"
        )
        
        return {
            "input_ids": input_encoding["input_ids"].flatten(),
            "attention_mask": input_encoding["attention_mask"].flatten(),
            "labels": target_encoding["input_ids"].flatten()
        }

class XOSTrainer:
    """XOS模型训练器"""
    
    def __init__(self, config: XOSConfig):
        self.config = config
        
        # 强制使用GPU，如果没有GPU则报错
        if not torch.cuda.is_available():
            raise RuntimeError("CUDA is not available! Please install CUDA or use CPU training.")
        
        self.device = torch.device("cuda")
        gpu_name = torch.cuda.get_device_name(0)
        gpu_memory = torch.cuda.get_device_properties(0).total_memory / (1024**3)  # GB
        logger.info(f"Using GPU: {gpu_name} ({gpu_memory:.1f}GB)")
        
        # 清空GPU缓存
        torch.cuda.empty_cache()
        
        # 初始化tokenizer和模型
        self.tokenizer = T5Tokenizer.from_pretrained(config.model_name)
        self.model = T5ForConditionalGeneration.from_pretrained(config.model_name)
        self.model.to(self.device)
        
        # 启用混合精度训练
        self.model.half()  # 转换为半精度
        logger.info("Model converted to half precision for GPU training")
        
    def prepare_data(self):
        """准备训练数据"""
        logger.info("Preparing training data...")
        
        # 数据处理器
        processor = XOSDataProcessor(self.config)
        
        # 加载数据
        xos_data = processor.load_xos_data()
        bird_data = processor.load_bird_data(limit=10000)  # 限制BIRD数据量
        
        # 合并数据
        all_data = xos_data + bird_data
        logger.info(f"Total samples: {len(all_data)}")
        
        # 预处理
        processed_data = processor.preprocess_data(all_data)
        
        # 分割数据
        train_data, temp_data = train_test_split(
            processed_data, 
            test_size=1-self.config.train_ratio, 
            random_state=42
        )
        
        val_data, test_data = train_test_split(
            temp_data,
            test_size=self.config.test_ratio/(self.config.val_ratio + self.config.test_ratio),
            random_state=42
        )
        
        logger.info(f"Train: {len(train_data)}, Val: {len(val_data)}, Test: {len(test_data)}")
        
        # 创建数据集
        self.train_dataset = XOSDataset(
            train_data, self.tokenizer, 
            self.config.max_input_length, self.config.max_target_length
        )
        
        self.val_dataset = XOSDataset(
            val_data, self.tokenizer,
            self.config.max_input_length, self.config.max_target_length
        )
        
        self.test_dataset = XOSDataset(
            test_data, self.tokenizer,
            self.config.max_input_length, self.config.max_target_length
        )
        
        return len(train_data)
    
    def train(self):
        """训练模型"""
        logger.info("Starting model training...")
        
        # 准备数据
        num_train_samples = self.prepare_data()
        
        # 训练参数 - GPU优化配置
        training_args = TrainingArguments(
            output_dir=str(self.config.models_dir / "xos_sql_generator"),
            num_train_epochs=self.config.num_epochs,
            per_device_train_batch_size=self.config.batch_size,
            per_device_eval_batch_size=self.config.batch_size,
            learning_rate=self.config.learning_rate,
            warmup_steps=self.config.warmup_steps,
            logging_steps=self.config.logging_steps,
            eval_steps=self.config.eval_steps,
            save_steps=self.config.save_steps,
            eval_strategy="steps",
            save_strategy="steps",
            load_best_model_at_end=True,
            metric_for_best_model="eval_loss",
            greater_is_better=False,
            save_total_limit=2,  # 减少保存的模型数量
            dataloader_pin_memory=True,  # 启用内存固定，加速GPU传输
            gradient_accumulation_steps=1,  # GPU上不需要梯度累积
            fp16=True,  # 强制启用半精度训练
            dataloader_num_workers=4,  # 增加数据加载线程
            remove_unused_columns=False,
            optim="adamw_torch",  # 使用PyTorch的AdamW
            lr_scheduler_type="linear",
            report_to=None,  # 禁用wandb等报告
        )
        
        # 创建训练器
        trainer = Trainer(
            model=self.model,
            args=training_args,
            train_dataset=self.train_dataset,
            eval_dataset=self.val_dataset,
            tokenizer=self.tokenizer,
        )
        
        # 开始训练
        trainer.train()
        
        # 保存最终模型
        final_model_path = self.config.models_dir / "xos_sql_generator_final"
        trainer.save_model(str(final_model_path))
        self.tokenizer.save_pretrained(str(final_model_path))
        
        logger.info(f"Training completed! Model saved to {final_model_path}")
        
        return trainer
    
    def evaluate(self, trainer):
        """评估模型"""
        logger.info("Evaluating model...")
        
        # 在测试集上评估
        eval_results = trainer.evaluate(self.test_dataset)
        logger.info(f"Test results: {eval_results}")
        
        # 生成一些示例
        self.generate_examples()
        
        return eval_results
    
    def generate_examples(self):
        """生成示例SQL"""
        logger.info("Generating example SQLs...")
        
        examples = [
            "Generate SQL for: 查询所有活跃用户 Database: xos_system",
            "Generate SQL for: 获取用户角色信息 Database: xos_system",
            "Generate SQL for: 查看库存不足的产品 Database: xos_system",
            "Generate SQL for: 统计客户订单数量 Database: xos_system"
        ]
        
        self.model.eval()
        with torch.no_grad():
            for example in examples:
                inputs = self.tokenizer(
                    example,
                    return_tensors="pt",
                    max_length=self.config.max_input_length,
                    truncation=True
                ).to(self.device)
                
                outputs = self.model.generate(
                    **inputs,
                    max_length=self.config.max_target_length,
                    num_beams=4,
                    temperature=0.7,
                    do_sample=True
                )
                
                generated_sql = self.tokenizer.decode(outputs[0], skip_special_tokens=True)
                logger.info(f"Input: {example}")
                logger.info(f"Generated SQL: {generated_sql}\n")

def check_gpu_setup():
    """检查GPU设置"""
    if not torch.cuda.is_available():
        logger.error("❌ CUDA不可用！请确保：")
        logger.error("1. 安装了支持CUDA的PyTorch版本")
        logger.error("2. 系统安装了NVIDIA驱动")
        logger.error("3. 安装了CUDA Toolkit")
        return False
    
    gpu_count = torch.cuda.device_count()
    for i in range(gpu_count):
        gpu_name = torch.cuda.get_device_name(i)
        gpu_memory = torch.cuda.get_device_properties(i).total_memory / (1024**3)
        logger.info(f"🚀 GPU {i}: {gpu_name} ({gpu_memory:.1f}GB)")
    
    # 检查GPU内存
    if gpu_memory < 4:
        logger.warning("⚠️  GPU内存较小，建议减小batch_size")
    
    return True

def main():
    """主函数"""
    logger.info("Starting XOS AI Model Training...")
    
    # 检查GPU设置
    if not check_gpu_setup():
        logger.error("GPU检查失败，请修复GPU环境后重试")
        return
    
    # 创建配置
    config = XOSConfig()
    
    # 创建训练器
    trainer_instance = XOSTrainer(config)
    
    try:
        # 训练模型
        trainer = trainer_instance.train()
        
        # 评估模型
        trainer_instance.evaluate(trainer)
        
        logger.info("XOS AI Model Training completed successfully!")
        
    except Exception as e:
        logger.error(f"Training failed: {e}")
        raise

if __name__ == "__main__":
    main()
