#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
XOS AI模型简化训练脚本
适合快速测试和小规模训练
"""

import json
import torch
from transformers import T5ForConditionalGeneration, T5Tokenizer, Trainer, TrainingArguments
from torch.utils.data import Dataset
import logging

logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)

class SimpleXOSDataset(Dataset):
    def __init__(self, data, tokenizer, max_length=512):
        self.data = data
        self.tokenizer = tokenizer
        self.max_length = max_length
    
    def __len__(self):
        return len(self.data)
    
    def __getitem__(self, idx):
        item = self.data[idx]
        
        # 编码输入和输出
        input_text = f"生成SQL: {item['question']}"
        target_text = item['sql']
        
        input_ids = self.tokenizer(
            input_text, 
            max_length=self.max_length, 
            padding="max_length", 
            truncation=True, 
            return_tensors="pt"
        )
        
        target_ids = self.tokenizer(
            target_text, 
            max_length=self.max_length, 
            padding="max_length", 
            truncation=True, 
            return_tensors="pt"
        )
        
        return {
            "input_ids": input_ids["input_ids"].flatten(),
            "attention_mask": input_ids["attention_mask"].flatten(),
            "labels": target_ids["input_ids"].flatten()
        }

def create_sample_data():
    """创建示例训练数据"""
    return [
        {
            "question": "查询所有用户信息",
            "sql": "SELECT * FROM users WHERE status = 'active'"
        },
        {
            "question": "获取用户的角色",
            "sql": "SELECT u.username, r.name FROM users u JOIN user_roles ur ON u.id = ur.user_id JOIN roles r ON ur.role_id = r.id"
        },
        {
            "question": "查看产品库存",
            "sql": "SELECT p.name, i.current_stock FROM products p JOIN inventory i ON p.id = i.product_id"
        },
        {
            "question": "统计订单数量",
            "sql": "SELECT COUNT(*) as order_count FROM orders WHERE created_at >= CURDATE()"
        },
        {
            "question": "查找低库存产品",
            "sql": "SELECT p.name FROM products p JOIN inventory i ON p.id = i.product_id WHERE i.current_stock < i.min_stock"
        }
    ]

def main():
    logger.info("开始简化训练...")
    
    # 加载模型和tokenizer
    model_name = "google/flan-t5-small"  # 使用小模型进行快速训练
    tokenizer = T5Tokenizer.from_pretrained(model_name)
    model = T5ForConditionalGeneration.from_pretrained(model_name)
    
    # 创建训练数据
    train_data = create_sample_data()
    
    # 加载更多数据（如果存在）
    try:
        with open("src/data/sql_patterns.json", 'r', encoding='utf-8') as f:
            sql_patterns = json.load(f)
            for key, pattern in sql_patterns.items():
                train_data.append({
                    "question": pattern["description"],
                    "sql": pattern["pattern"]
                })
    except:
        logger.warning("无法加载sql_patterns.json，使用示例数据")
    
    # 创建数据集
    dataset = SimpleXOSDataset(train_data, tokenizer)
    
    # 训练参数
    training_args = TrainingArguments(
        output_dir="./models/xos_simple",
        num_train_epochs=3,
        per_device_train_batch_size=2,
        learning_rate=5e-4,
        logging_steps=10,
        save_steps=100,
        save_total_limit=2,
    )
    
    # 创建训练器
    trainer = Trainer(
        model=model,
        args=training_args,
        train_dataset=dataset,
        tokenizer=tokenizer,
    )
    
    # 开始训练
    trainer.train()
    
    # 保存模型
    trainer.save_model("./models/xos_simple_final")
    tokenizer.save_pretrained("./models/xos_simple_final")
    
    logger.info("训练完成！")
    
    # 测试生成
    test_questions = [
        "查询所有活跃用户",
        "获取用户权限信息",
        "统计今日订单"
    ]
    
    model.eval()
    for question in test_questions:
        input_text = f"生成SQL: {question}"
        inputs = tokenizer(input_text, return_tensors="pt")
        
        with torch.no_grad():
            outputs = model.generate(**inputs, max_length=128, num_beams=2)
            generated_sql = tokenizer.decode(outputs[0], skip_special_tokens=True)
            
        logger.info(f"问题: {question}")
        logger.info(f"生成SQL: {generated_sql}\n")

if __name__ == "__main__":
    main()
