import os
import json
from torch.utils.data import Dataset
from transformers import (
    T5ForConditionalGeneration,
    T5Tokenizer,
    Trainer,
    TrainingArguments,
    DataCollatorForSeq2Seq
)

# 训练配置
class Config:
    def __init__(self):
        self.model_name = 't5-base'
        self.cache_dir = './cache'
        self.output_dir = './output'
        self.num_train_epochs = 3
        self.batch_size = 8
        self.learning_rate = 5e-5
        self.warmup_steps = 500
        self.save_steps = 1000
        self.eval_steps = 1000
        self.max_length = 512

config = Config()

# 数据处理类
class CypherDataset(Dataset):
    def __init__(self, data, schemas, tokenizer, max_length):
        self.data = data
        self.schemas = schemas
        self.tokenizer = tokenizer
        self.max_length = max_length

    def __len__(self):
        return len(self.data)

    def __getitem__(self, idx):
        item = self.data[idx]
        input_text = item['input']
        target_text = item['output']
        inputs = self.tokenizer(
            input_text,
            max_length=self.max_length,
            truncation=True,
            return_tensors='pt'
        )
        targets = self.tokenizer(
            target_text,
            max_length=self.max_length,
            truncation=True,
            return_tensors='pt'
        )
        return {
            'input_ids': inputs['input_ids'].squeeze(),
            'attention_mask': inputs['attention_mask'].squeeze(),
            'labels': targets['input_ids'].squeeze()
        }

def load_schemas():
    base_path = r"E:\ccfcom\bdci\schema"
    schema_files = [
        'YAGO.JSON',
        'THE_THREE_BODY.JSON',
        'MOVIE.JSON',
        'FINBENCH.JSON'
    ]
    schemas = {}
    # 加载 schema 文件
    for file in schema_files:
        file_path = os.path.join(base_path, file)
        with open(file_path, 'r', encoding='utf-8') as f:
            schema_name = os.path.splitext(file)[0].lower()
            schemas[schema_name] = json.load(f)
    # 添加 common 类型的基础 schema
    schemas['common'] = [
        {
            "label": "person",
            "type": "VERTEX",
            "properties": [
                {"name": "name", "type": "STRING"},
                {"name": "born", "type": "INT32", "optional": True},
                {"name": "belt", "type": "STRING", "optional": True},
                {"name": "age", "type": "INT32", "optional": True}
            ]
        },
        {
            "label": "movie",
            "type": "VERTEX",
            "properties": [
                {"name": "title", "type": "STRING"},
                {"name": "released", "type": "INT32", "optional": True}
            ]
        },
        {
            "label": "acted_in",
            "type": "EDGE",
            "properties": [
                {"name": "role", "type": "STRING", "optional": True}
            ]
        }
    ]
    return schemas

def train():
    # 加载 schemas
    schemas = load_schemas()
    if not schemas:
        raise ValueError("Schemas could not be loaded.")

    # 提供完整的训练数据文件路径
    train_data_path = r"E:\ccfcom\bdci\schema\train_cypher.json"

    # 加载训练数据
    with open(train_data_path, 'r', encoding='utf-8') as f:
        train_data = json.load(f)

    # 初始化模型和 tokenizer
    tokenizer = T5Tokenizer.from_pretrained(
        config.model_name,
        cache_dir=config.cache_dir
    )
    model = T5ForConditionalGeneration.from_pretrained(
        config.model_name,
        cache_dir=config.cache_dir
    )

    # 创建数据集
    train_dataset = CypherDataset(
        train_data,
        schemas,
        tokenizer,
        config.max_length
    )

    # 训练参数
    training_args = TrainingArguments(
        output_dir=config.output_dir,
        num_train_epochs=config.num_train_epochs,
        per_device_train_batch_size=config.batch_size,
        learning_rate=config.learning_rate,
        warmup_steps=config.warmup_steps,
        save_steps=config.save_steps,
        evaluation_strategy='no',
        eval_steps=config.eval_steps,
        weight_decay=0.01,
        save_total_limit=2
    )

    # 创建训练器
    trainer = Trainer(
        model=model,
        args=training_args,
        train_dataset=train_dataset,
        data_collator=DataCollatorForSeq2Seq(tokenizer)
    )

    # 开始训练
    trainer.train()

    # 保存模型
    trainer.save_model(config.output_dir)
    tokenizer.save_pretrained(config.output_dir)

def generate_predictions():
    # 加载 schemas
    schemas = load_schemas()
    if not schemas:
        raise ValueError("Schemas could not be loaded.")

    # 加载模型和 tokenizer
    tokenizer = T5Tokenizer.from_pretrained(config.output_dir)
    model = T5ForConditionalGeneration.from_pretrained(config.output_dir)

    # 提供完整的测试数据文件路径
    test_data_path = r"E:\ccfcom\bdci\schema\test_cypher.json"

    # 加载测试数据
    with open(test_data_path, 'r', encoding='utf-8') as f:
        test_data = json.load(f)

    # 生成预测
    predictions = []
    for idx, item in enumerate(test_data):
        input_text = item['question']
        inputs = tokenizer(
            input_text,
            return_tensors='pt',
            max_length=config.max_length,
            truncation=True
        )
        outputs = model.generate(
            input_ids=inputs['input_ids'],
            attention_mask=inputs['attention_mask'],
            max_length=config.max_length
        )
        # 解码预测结果
        predicted_text = tokenizer.decode(outputs[0], skip_special_tokens=True)
        # 添加到结果列表
        predictions.append({
            "index": str(idx),
            "answer": predicted_text
        })

    # 保存预测结果
    with open('answer_cypher.json', 'w', encoding='utf-8') as f:
        json.dump(predictions, f, ensure_ascii=False, indent=2)

    print("预测结果已保存到 answer_cypher.json")

if __name__ == "__main__":
    train()
    generate_predictions()