import os
import json
import torch
import numpy as np
from torch.utils.data import Dataset
from transformers import (
    T5ForConditionalGeneration, 
    T5Tokenizer,
    Trainer,
    TrainingArguments,
    DataCollatorForSeq2Seq
)

# 训练配置
class Config:
    def __init__(self):
        self.model_name = "t5-base"
        self.cache_dir = "./model_cache"
        self.output_dir = "./results"
        self.num_train_epochs = 3
        self.batch_size = 4 
        self.learning_rate = 5e-5
        self.max_length = 512
        self.warmup_steps = 100
        self.save_steps = 1000
        self.eval_steps = 1000
        
config = Config()

# 数据处理类
class CypherDataset(Dataset):
    def __init__(self, data, schemas, tokenizer, max_length):
        self.data = data
        self.schemas = schemas
        self.tokenizer = tokenizer
        self.max_length = max_length

    def __len__(self):
        return len(self.data)

    def __getitem__(self, idx):
        item = self.data[idx]
        schema = self.schemas[item['db_id']]
    
        # 修改输入文本构建，使用'input'而不是'question'
        input_text = f"Schema: {schema}\nQuestion: {item['input']}" 
        target_text = item['output']

        # 编码
        inputs = self.tokenizer(
            input_text,
            max_length=self.max_length,
            padding='max_length',
            truncation=True,
            return_tensors="np"
        )
    
        targets = self.tokenizer(
            target_text,
            max_length=self.max_length,
            padding='max_length',
            truncation=True,
            return_tensors="np"
        )

        return {
            'input_ids': torch.tensor(inputs.input_ids.squeeze(), dtype=torch.long),
            'attention_mask': torch.tensor(inputs.attention_mask.squeeze(), dtype=torch.long),
            'labels': torch.tensor(targets.input_ids.squeeze(), dtype=torch.long)
        }

def train():
    base_path = r"E:\ccfcom\bdci\schema"
    
    # 加载schema
    schemas = {}
    schema_files = [
        'YAGO.JSON',
        'THE_THREE_BODY.JSON',
        'MOVIE.JSON', 
        'FINBENCH.JSON'
    ]
    
    # 加载schema文件
    for file in schema_files:
        file_path = os.path.join(base_path, file)
        try:
            with open(file_path, 'r', encoding='utf-8') as f:
                schema = json.load(f)
                schemas[schema['db_id']] = schema['schema']
        except FileNotFoundError:
            print(f"警告: 找不到文件 {file_path}")
            continue
            
    # 添加common类型的基础schema
    schemas['common'] = [
        {
            "label": "person",
            "type": "VERTEX",
            "properties": [
                {"name": "name", "type": "STRING"},
                {"name": "born", "type": "INT32", "optional": True},
                {"name": "belt", "type": "STRING", "optional": True},
                {"name": "age", "type": "INT32", "optional": True}
            ]
        },
        {
            "label": "movie",
            "type": "VERTEX",
            "properties": [
                {"name": "title", "type": "STRING"},
                {"name": "released", "type": "INT32", "optional": True}
            ]
        },
        {
            "label": "acted_in",
            "type": "EDGE",
            "properties": [
                {"name": "role", "type": "STRING", "optional": True}
            ]
        }
    ]
    # 添加movie类型的基础schema
    schemas['movie'] = [
        {
            "label": "person",
            "type": "VERTEX",
            "primary": "id",
            "properties": [
                {"name": "id", "type": "INT32"},
                {"name": "name", "type": "STRING"},
                {"name": "born", "type": "INT32", "optional": True},
                {"name": "poster_image", "type": "STRING", "optional": True}
            ]
        },
        {
            "label": "genre",
            "type": "VERTEX",
            "primary": "id",
            "properties": [
                {"name": "id", "type": "INT32"},
                {"name": "name", "type": "STRING"}
            ]
        },
        {
            "label": "keyword",
            "type": "VERTEX",
            "primary": "id",
            "properties": [
                {"name": "id", "type": "INT32"},
                {"name": "name", "type": "STRING"}
            ]
        },
        {
            "label": "movie",
            "type": "VERTEX",
            "primary": "id",
            "properties": [
                {"name": "id", "type": "INT32"},
                {"name": "title", "type": "STRING", "index": True},
                {"name": "tagline", "type": "STRING"},
                {"name": "summary", "type": "STRING", "optional": True},
                {"name": "poster_image", "type": "STRING", "optional": True},
                {"name": "duration", "type": "INT32"},
                {"name": "rated", "type": "STRING", "optional": True}
            ]
        },
        {
            "label": "user",
            "type": "VERTEX",
            "primary": "id",
            "properties": [
                {"name": "id", "type": "INT32"},
                {"name": "login", "type": "STRING", "index": True}
            ]
        },
        {
            "label": "acted_in",
            "type": "EDGE",
            "properties": [
                {"name": "role", "type": "STRING"}
            ]
        },
        {
            "label": "rate",
            "type": "EDGE",
            "properties": [
                {"name": "stars", "type": "INT8"}
            ]
        },
        {"label": "directed", "type": "EDGE"},
        {"label": "is_friend", "type": "EDGE"},
        {"label": "has_genre", "type": "EDGE"},
        {"label": "has_keyword", "type": "EDGE"},
        {"label": "produce", "type": "EDGE"},
        {"label": "write", "type": "EDGE"}
    ]
    schemas['yoga'] = [
        {
            "label": "Person",
            "type": "VERTEX",
            "primary": "name",
            "properties": [
                {"name": "name", "type": "STRING"},
                {"name": "age", "type": "INT16", "optional": True},
                {"name": "birthyear", "type": "INT16", "optional": True},
                {"name": "gender", "type": "INT8", "optional": True},
                {"name": "phone", "type": "INT16", "unique": False, "index": True}
            ]
        },
        {
            "label": "City",
            "type": "VERTEX",
            "primary": "name",
            "properties": [
                {"name": "name", "type": "STRING"},
                {"name": "area", "type": "DOUBLE"},
                {"name": "population", "type": "DOUBLE"}
            ]
        },
        {
            "label": "Film",
            "type": "VERTEX",
            "primary": "title",
            "properties": [
                {"name": "title", "type": "STRING"}
            ]
        },
        {"label": "HAS_CHILD", "type": "EDGE"},
        {"label": "MARRIED", "type": "EDGE"},
        {
            "label": "BORN_IN",
            "type": "EDGE",
            "properties": [
                {"name": "weight", "type": "FLOAT", "optional": True}
            ]
        },
        {
            "label": "KNOWS",
            "type": "EDGE",
            "properties": [
                {"name": "weight", "type": "FLOAT", "optional": True}
            ]
        },
        {"label": "DIRECTED", "type": "EDGE"},
        {"label": "WROTE_MUSIC_FOR", "type": "EDGE"},
        {
            "label": "ACTED_IN",
            "type": "EDGE",
            "properties": [
                {"name": "charactername", "type": "STRING"}
            ]
        }
    ]
    if not schemas:
        raise ValueError("没有成功加载任何schema文件")

    # 加载训练数据
    train_data_path = os.path.join(base_path, 'train_cypher.json')
    try:
        with open(train_data_path, 'r', encoding='utf-8') as f:
            train_data = json.load(f)
    except FileNotFoundError:
        raise FileNotFoundError(f"找不到训练数据文件: {train_data_path}")
        
    # 初始化模型和tokenizer  
    tokenizer = T5Tokenizer.from_pretrained(
        config.model_name,
        cache_dir=config.cache_dir,
        legacy=False
    )
    
    model = T5ForConditionalGeneration.from_pretrained(
        config.model_name, 
        cache_dir=config.cache_dir
    )

    # 创建数据集
    train_dataset = CypherDataset(
        train_data,
        schemas,
        tokenizer,
        config.max_length
    )

    # 训练参数
    training_args = TrainingArguments(
        output_dir=config.output_dir,
        num_train_epochs=config.num_train_epochs,
        per_device_train_batch_size=config.batch_size,
        learning_rate=config.learning_rate,
        warmup_steps=config.warmup_steps,
        weight_decay=0.01,
        save_steps=config.save_steps,
        save_total_limit=2,
        eval_steps=config.eval_steps
    )

    # 创建训练器
    trainer = Trainer(
        model=model,
        args=training_args,
        train_dataset=train_dataset,
        data_collator=DataCollatorForSeq2Seq(tokenizer)
    )

    # 开始训练
    trainer.train()

    # 保存模型
    trainer.save_model("./cypher_model")
    tokenizer.save_pretrained("./cypher_model")

def generate_predictions():
    base_path = r"E:\ccfcom\bdci\schema"
    
    # 1. 加载模型和tokenizer
    try:
        model = T5ForConditionalGeneration.from_pretrained("./cypher_model")
        tokenizer = T5Tokenizer.from_pretrained("./cypher_model")
        model.eval()  # 设置为评估模式
    except Exception as e:
        print(f"加载模型失败: {e}")
        return

    # 2. 加载schema文件
    schemas = {}
    schema_files = [
        'YAGO.JSON', 'THE_THREE_BODY.JSON', 
        'MOVIE.JSON', 'FINBENCH.JSON'
    ]
    
    for file in schema_files:
        file_path = os.path.join(base_path, file)
        try:
            with open(file_path, 'r', encoding='utf-8') as f:
                schema = json.load(f)
                schemas[schema['db_id']] = schema['schema']
        except FileNotFoundError:
            print(f"警告: 找不到文件 {file_path}")
            continue
            
    # 3. 添加schema
    schemas['common'] = [
        {
            "label": "person",
            "type": "VERTEX",
            "properties": [
                {"name": "name", "type": "STRING"},
                {"name": "born", "type": "INT32", "optional": True},
                {"name": "belt", "type": "STRING", "optional": True},
                {"name": "age", "type": "INT32", "optional": True}
            ]
        },
        {
            "label": "movie",
            "type": "VERTEX",
            "properties": [
                {"name": "title", "type": "STRING"},
                {"name": "released", "type": "INT32", "optional": True}
            ]
        },
        {
            "label": "acted_in",
            "type": "EDGE",
            "properties": [
                {"name": "role", "type": "STRING", "optional": True}
            ]
        }
    ]
    schemas['movie'] = [
        {
            "label": "person",
            "type": "VERTEX",
            "primary": "id",
            "properties": [
                {"name": "id", "type": "INT32"},
                {"name": "name", "type": "STRING"},
                {"name": "born", "type": "INT32", "optional": True},
                {"name": "poster_image", "type": "STRING", "optional": True}
            ]
        },
        {
            "label": "genre",
            "type": "VERTEX",
            "primary": "id",
            "properties": [
                {"name": "id", "type": "INT32"},
                {"name": "name", "type": "STRING"}
            ]
        },
        {
            "label": "keyword",
            "type": "VERTEX",
            "primary": "id",
            "properties": [
                {"name": "id", "type": "INT32"},
                {"name": "name", "type": "STRING"}
            ]
        },
        {
            "label": "movie",
            "type": "VERTEX",
            "primary": "id",
            "properties": [
                {"name": "id", "type": "INT32"},
                {"name": "title", "type": "STRING", "index": True},
                {"name": "tagline", "type": "STRING"},
                {"name": "summary", "type": "STRING", "optional": True},
                {"name": "poster_image", "type": "STRING", "optional": True},
                {"name": "duration", "type": "INT32"},
                {"name": "rated", "type": "STRING", "optional": True}
            ]
        },
        {
            "label": "user",
            "type": "VERTEX",
            "primary": "id",
            "properties": [
                {"name": "id", "type": "INT32"},
                {"name": "login", "type": "STRING", "index": True}
            ]
        },
        {
            "label": "acted_in",
            "type": "EDGE",
            "properties": [
                {"name": "role", "type": "STRING"}
            ]
        },
        {
            "label": "rate",
            "type": "EDGE",
            "properties": [
                {"name": "stars", "type": "INT8"}
            ]
        },
        {"label": "directed", "type": "EDGE"},
        {"label": "is_friend", "type": "EDGE"},
        {"label": "has_genre", "type": "EDGE"},
        {"label": "has_keyword", "type": "EDGE"},
        {"label": "produce", "type": "EDGE"},
        {"label": "write", "type": "EDGE"}
    ]
    schemas['yoga'] = [
        {
            "label": "Person",
            "type": "VERTEX",
            "primary": "name",
            "properties": [
                {"name": "name", "type": "STRING"},
                {"name": "age", "type": "INT16", "optional": True},
                {"name": "birthyear", "type": "INT16", "optional": True},
                {"name": "gender", "type": "INT8", "optional": True},
                {"name": "phone", "type": "INT16", "unique": False, "index": True}
            ]
        },
        {
            "label": "City",
            "type": "VERTEX",
            "primary": "name",
            "properties": [
                {"name": "name", "type": "STRING"},
                {"name": "area", "type": "DOUBLE"},
                {"name": "population", "type": "DOUBLE"}
            ]
        },
        {
            "label": "Film",
            "type": "VERTEX",
            "primary": "title",
            "properties": [
                {"name": "title", "type": "STRING"}
            ]
        },
        {"label": "HAS_CHILD", "type": "EDGE"},
        {"label": "MARRIED", "type": "EDGE"},
        {
            "label": "BORN_IN",
            "type": "EDGE",
            "properties": [
                {"name": "weight", "type": "FLOAT", "optional": True}
            ]
        },
        {
            "label": "KNOWS",
            "type": "EDGE",
            "properties": [
                {"name": "weight", "type": "FLOAT", "optional": True}
            ]
        },
        {"label": "DIRECTED", "type": "EDGE"},
        {"label": "WROTE_MUSIC_FOR", "type": "EDGE"},
        {
            "label": "ACTED_IN",
            "type": "EDGE",
            "properties": [
                {"name": "charactername", "type": "STRING"}
            ]
        }
    ]
    # 4. 加载测试数据
    test_data_path = os.path.join(base_path, 'test_cypher.json')
    try:
        with open(test_data_path, 'r', encoding='utf-8') as f:
            test_data = json.load(f)
    except FileNotFoundError:
        print(f"找不到测试数据文件: {test_data_path}")
        return

    # 5. 生成预测结果
    predictions = []
    with torch.no_grad():
        for idx, item in enumerate(test_data):
            schema = schemas[item['db_id']]
            input_text = f"Schema: {schema}\nQuestion: {item['question']}"
            
            # 编码输入
            inputs = tokenizer(
                input_text,
                return_tensors="pt",
                max_length=512,
                truncation=True
            )

            # 生成答案
            outputs = model.generate(
                input_ids=inputs.input_ids,
                attention_mask=inputs.attention_mask,
                max_length=128,
                num_beams=5,
                early_stopping=True
            )

            # 解码预测结果
            predicted_text = tokenizer.decode(outputs[0], skip_special_tokens=True)
            
            # 添加到结果列表
            predictions.append({
                "index": str(idx),
                "answer": predicted_text
            })

    # 6. 保存预测结果
    with open('answer_cypher.json', 'w', encoding='utf-8') as f:
        json.dump(predictions, f, ensure_ascii=False, indent=2)

    print("预测结果已保存到 answer_cypher.json")

if __name__ == "__main__":
    train()
    generate_predictions()