import torch
import torch.nn.functional as F
from torch.utils.data import Dataset, DataLoader
from model import CodeTransformer
import tiktoken
import argparse
import math
import time
import numpy as np
from torch.cuda.amp import autocast, GradScaler

class CodeDataset(Dataset):
    def __init__(self, file_path, seq_length=1024):
        with open(file_path, 'r') as f:
            text = f.read()
        enc = tiktoken.get_encoding("gpt2")
        self.tokens = enc.encode(text)
        self.seq_length = seq_length
        
    def __len__(self):
        return len(self.tokens) // self.seq_length
        
    def __getitem__(self, idx):
        start = idx * self.seq_length
        end = start + self.seq_length + 1
        x = torch.tensor(self.tokens[start:end-1], dtype=torch.long)
        y = torch.tensor(self.tokens[start+1:end], dtype=torch.long)
        return x, y

def train_epoch(model, loader, optimizer, device, scaler, grad_clip=1.0):
    model.train()
    losses = []
    for batch, (x, y) in enumerate(loader):
        x, y = x.to(device), y.to(device)
        optimizer.zero_grad()
        
        with autocast():
            outputs = model(x)
            loss = F.cross_entropy(outputs.view(-1, outputs.size(-1)), y.view(-1))
            
        scaler.scale(loss).backward()
        torch.nn.utils.clip_grad_norm_(model.parameters(), grad_clip)
        scaler.step(optimizer)
        scaler.update()
        
        losses.append(loss.item())
        if batch % 100 == 0:
            print(f"Batch {batch} | Loss: {loss.item():.4f}")
            
    return np.mean(losses)

def main():
    parser = argparse.ArgumentParser()
    parser.add_argument("--data", type=str, default="code_data.txt")
    parser.add_argument("--batch_size", type=int, default=32)
    parser.add_argument("--epochs", type=int, default=10)
    parser.add_argument("--lr", type=float, default=3e-4)
    parser.add_argument("--seq_length", type=int, default=1024)
    args = parser.parse_args()
    
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    model = CodeTransformer().to(device)
    optimizer = torch.optim.AdamW(model.parameters(), lr=args.lr)
    scaler = GradScaler()
    
    dataset = CodeDataset(args.data, args.seq_length)
    loader = DataLoader(dataset, batch_size=args.batch_size, shuffle=True)
    
    best_loss = float('inf')
    for epoch in range(args.epochs):
        start_time = time.time()
        train_loss = train_epoch(model, loader, optimizer, device, scaler)
        elapsed = time.time() - start_time
        
        print(f"Epoch {epoch+1} | Loss: {train_loss:.4f} | Time: {elapsed:.2f}s")
        
        if train_loss < best_loss:
            best_loss = train_loss
            torch.save(model.state_dict(), "best_model.pth")
            print("Saved best model")
            
if __name__ == "__main__":
    main()

class CodeDataset(Dataset):
    def __init__(self, file_path, max_length=1024):
        self.enc = tiktoken.get_encoding("gpt2")
        with open(file_path, 'r') as f:
            self.data = f.read()
        
        tokens = self.enc.encode(self.data)
        self.examples = [tokens[i:i+max_length] for i in range(0, len(tokens), max_length)]
    
    def __len__(self):
        return len(self.examples)
    
    def __getitem__(self, idx):
        x = torch.tensor(self.examples[idx][:-1], dtype=torch.long)
        y = torch.tensor(self.examples[idx][1:], dtype=torch.long)
        return x, y

def train():
    parser = argparse.ArgumentParser()
    parser.add_argument("--batch_size", type=int, default=4)
    parser.add_argument("--epochs", type=int, default=10)
    parser.add_argument("--learning_rate", type=float, default=3e-4)
    parser.add_argument("--data_file", default="train_code.txt")
    parser.add_argument("--save_path", default="model.pth")
    args = parser.parse_args()
    
    # 初始化模型和优化器
    model = CodeTransformer()
    optimizer = torch.optim.AdamW(model.parameters(), lr=args.learning_rate)
    
    # 准备数据集
    dataset = CodeDataset(args.data_file)
    dataloader = DataLoader(dataset, batch_size=args.batch_size, shuffle=True)
    
    # 训练循环
    model.train()
    for epoch in range(args.epochs):
        total_loss = 0
        for batch_idx, (x, y) in enumerate(dataloader):
            optimizer.zero_grad()
            
            logits = model(x)
            loss = F.cross_entropy(logits.view(-1, logits.size(-1)), y.view(-1))
            
            loss.backward()
            optimizer.step()
            
            total_loss += loss.item()
            if batch_idx % 10 == 0:
                print(f"Epoch {epoch+1} | Batch {batch_idx} | Loss: {loss.item():.4f}")
        
        avg_loss = total_loss / len(dataloader)
        print(f"Epoch {epoch+1} | Average Loss: {avg_loss:.4f}")
    
    # 保存模型
    torch.save(model.state_dict(), args.save_path)
    print(f"Model saved to {args.save_path}")

if __name__ == "__main__":
    train()