import torch as t
import torch
from torch_geometric.data import Dataset
from torch_geometric.data import Batch
from torch_geometric.loader import DataLoader  # 注意是 PyG 的 DataLoader
from torch_geometric.nn.norm import LayerNorm
from torch_geometric.nn.conv import TransformerConv
from torch.nn import Dropout, MaxPool1d, Linear
import torch.nn.functional as F
import os
from pathlib import Path
from torch.utils.data import Subset, random_split, ConcatDataset
from torch_geometric.utils import dropout_adj
import sys
from torch_geometric.nn import global_mean_pool
import logging
from datetime import datetime

# 设置日志配置
def setup_logger():
    # 创建logs目录（如果不存在）
    log_dir = Path('/home/user1/code')
    log_dir.mkdir(exist_ok=True)
    
    # 创建日志文件名（使用时间戳）
    timestamp = datetime.now().strftime('%Y%m%d_%H%M%S')
    log_file = log_dir / f'training_{timestamp}.log'
    
    # 配置日志
    logging.basicConfig(
        level=logging.INFO,
        format='%(asctime)s - %(message)s',
        handlers=[
            logging.FileHandler(log_file),
            logging.StreamHandler()
        ]
    )
    return logging.getLogger()

# 在主训练代码之前初始化日志
logger = setup_logger()

# 1. 数据集加载
class GraphDataset(Dataset):
    def __init__(self, root, label):
        super(GraphDataset, self).__init__()
        self.root = root
        self.label = label
        self.files = list(Path(root).glob("*.pt"))
        self.valid_files = []
        for file in self.files:
            data = torch.load(file)
            if data.num_nodes > 1 and hasattr(data, 'id'):
                self.valid_files.append(file)
        logger.info(f"目录 {root} 中总文件数: {len(self.files)}, 有效文件数: {len(self.valid_files)}")
    def len(self):
        return len(self.files)
    
    
    def get(self, idx):
        data = torch.load(self.valid_files[idx])
        data.y = torch.tensor([self.label], dtype=torch.long)
        return data

    def __getitem__(self, idx):
        return self.get(idx)
        
def collate_fn(data_list):
    """自定义的 collate 函数，用于将图数据正确地组合成批次"""
    if isinstance(data_list[0], list):
        data_list = [item for sublist in data_list for item in sublist]
    return Batch.from_data_list(data_list)


num_samples = 2900  # 每类样本数量
goodpt_dataset = GraphDataset('/home/user1/dataset/goodpt', label=0)
malpt_dataset = GraphDataset('/home/user1/dataset/malpt', label=1)

# 创建子集
goodpt_subset = Subset(goodpt_dataset, range(min(num_samples, len(goodpt_dataset))))
malpt_subset = Subset(malpt_dataset, range(min(num_samples, len(malpt_dataset))))
full_dataset = ConcatDataset([goodpt_subset, malpt_subset])

# 划分训练集和测试集
train_size = int(0.8 * len(full_dataset))
test_size = len(full_dataset) - train_size
train_dataset, test_dataset = random_split(full_dataset, [train_size, test_size])

train_loader = DataLoader(
    train_dataset, 
    batch_size=32, 
    shuffle=True,
    collate_fn=collate_fn
)

test_loader = DataLoader(
    test_dataset, 
    batch_size=32, 
    shuffle=False,
    collate_fn=collate_fn
)


HIDDEN_DIM = 32
LEAKY_SLOPE = 0.2

class CGMega(t.nn.Module):
    def __init__(self, in_channels, hidden_channels, heads, drop_rate, attn_drop_rate, residual, devices_available):
        super(CGMega, self).__init__()
        self.devices_available = devices_available
        self.drop_rate = drop_rate
        self.convs = t.nn.ModuleList()
        self.residual = residual
        mid_channels = in_channels + hidden_channels if residual else hidden_channels

        self.convs.append(TransformerConv(in_channels, hidden_channels, heads=heads, dropout=attn_drop_rate, 
                                          concat=False, beta=True).to(self.devices_available))
        
        self.convs.append(TransformerConv(mid_channels, hidden_channels, heads=heads,
                                          dropout=attn_drop_rate, concat=True, beta=True).to(self.devices_available))
        
        self.ln1 = LayerNorm(in_channels=mid_channels).to(self.devices_available)
        self.ln2 = LayerNorm(in_channels=hidden_channels *
                             heads).to(self.devices_available)
        self.pool = MaxPool1d(2, 2)
        self.dropout = Dropout(drop_rate)
        self.lins = t.nn.ModuleList()
        self.lins.append(Linear(hidden_channels * heads, HIDDEN_DIM).to(devices_available))
        self.lins.append(Linear(HIDDEN_DIM, 1).to(devices_available))
        for lin in self.lins:
            t.nn.init.kaiming_uniform_(lin.weight)
            if lin.bias is not None:
                t.nn.init.zeros_(lin.bias)
        
    def forward(self, data):
        data = data.to(self.devices_available)
        x = data.x
        res = x
        x = self.convs[0](x, data.edge_index)
        x = F.leaky_relu(x, negative_slope=LEAKY_SLOPE, inplace=True)
        x = t.cat((x, res), dim=1) if self.residual else x
        x = self.ln1(x)
        x = self.convs[1](x.to(self.devices_available), data.edge_index.to(self.devices_available))
        x = self.ln2(x)
        x = F.leaky_relu(x, negative_slope=LEAKY_SLOPE)
        
        # **全局池化，汇总节点特征为图特征**
        x = global_mean_pool(x, data.batch)  # [batch_size, hidden_dim * heads]
        
        x = self.lins[0](x).relu()  # 分类器层
        x = self.dropout(x)
        x = self.lins[1](x)

        return t.sigmoid(x).squeeze()

device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
model = CGMega(in_channels=768, hidden_channels=64, heads=8, drop_rate=0.5, attn_drop_rate=0.5, residual=True, devices_available=device).to(device)

# 修改主训练循环
# 5. 训练和测试流程
epochs = 50
logger.info(f"Starting training with {epochs} epochs")
logger.info(f"Model architecture:\n{model}")
logger.info(f"训练集大小: {train_size}")
logger.info(f"测试集大小: {test_size}")
# 定义优化器和损失函数
optimizer = torch.optim.Adam(model.parameters(), lr=0.001)
criterion = torch.nn.BCELoss()

def train(model, train_loader, optimizer, criterion, device):
    model.train()
    total_loss = 0
    correct = 0
    total = 0
    
    for batch in train_loader:
        batch = batch.to(device)
        optimizer.zero_grad()
        
        out = model(batch)
        target = batch.y.float().squeeze()
        loss = criterion(out, target) 
        
        loss.backward()
        optimizer.step()
        
        total_loss += loss.item() * batch.num_graphs
        pred = (out > 0.5).float()
        correct += (pred == batch.y.float()).sum().item()
        total += batch.num_graphs
    
    return total_loss / total, correct / total

def test(model, test_loader, criterion, device):
    model.eval()
    total_loss = 0
    correct = 0
    total = 0
    
    with torch.no_grad():
        for batch in test_loader:
            batch = batch.to(device)
            out = model(batch)
            loss = criterion(out, batch.y.float())
            
            total_loss += loss.item() * batch.num_graphs
            pred = (out > 0.5).float()
            correct += (pred == batch.y.float()).sum().item()
            total += batch.num_graphs
    
    return total_loss / total, correct / total

# 训练循环
best_acc = 0
for epoch in range(epochs):
    train_loss, train_acc = train(model, train_loader, optimizer, criterion, device)
    test_loss, test_acc = test(model, test_loader, criterion, device)
    
    # 记录训练过程
    logger.info(f'Epoch {epoch+1}/{epochs}:')
    logger.info(f'Train Loss: {train_loss:.4f}, Train Acc: {train_acc:.4f}')
    logger.info(f'Test Loss: {test_loss:.4f}, Test Acc: {test_acc:.4f}')
    
    # 保存最佳模型
    if test_acc > best_acc:
        best_acc = test_acc
        torch.save(model.state_dict(), 'best_model.pt')
        logger.info(f'保存新的最佳模型，准确率: {best_acc:.4f}')

logger.info(f'训练完成！最佳测试准确率: {best_acc:.4f}')