import torch
import torch.nn as nn
import math
import os
from torch_geometric.nn import global_mean_pool
import pickle
# LSTM model for vulnerability detection
class LSTM(nn.Module):
    def __init__(self, nfeat, nhid, nclass=2, batch_size=1, annotation_dir=None, annotation_usage=None, device=None, dropout=0.9):
        super(LSTM, self).__init__()
        self.dropout = dropout
        self.batch_size = batch_size
        self.nhid = nhid
        self.annotation_dir = annotation_dir
        self.annotation_usage = annotation_usage

        if device is None:
            self.device = torch.device("cuda:1" if torch.cuda.is_available() else "cpu")
        else:
            self.device = device

        # 使用双向LSTM
        self.lstm = nn.LSTM(
            input_size=nfeat,
            hidden_size=nhid,
            num_layers=4,  # 使用4层LSTM
            batch_first=True,
            bidirectional=True,  # 启用双向LSTM
            dropout=dropout  # 多层LSTM使用dropout
        )
        
        # 调整分类器结构以适应双向LSTM
        self.classifier = nn.Sequential(
            nn.Linear(nhid * 2, 128),  # *2是因为双向LSTM
            nn.ReLU(),
            # nn.Dropout(dropout),
            nn.Linear(128, nclass)
        )

        self.to(self.device)

    def forward_old(self, data):
        # print(1111111,data.asm_count)
        data = data.to(self.device)
        # print(2222222,data.x.shape)
        seq_len = data.asm_count
        x = data.x[:, :seq_len, :] 
        # print(3333333,x.shape)
        # exit()
        
        # 确保输入维度正确 [batch_size, seq_len, nfeat]
        if len(x.shape) == 2:
            x = x.unsqueeze(0)  # 添加batch维度
            
        # LSTM前向传播
        lstm_out, (hidden, cell) = self.lstm(x)
        
        # 使用最后一个时间步的输出
        # 对于双向LSTM，需要连接前向和后向的最后一个隐藏状态
        last_hidden = torch.cat((lstm_out[:, data.asm_count.item()-1, :self.nhid], 
                               lstm_out[:, 0, self.nhid:]), dim=1)
        # print(last_hidden.shape)
        # 通过分类器得到预测结果
        out = self.classifier(last_hidden)
        print(out.shape,type(out))
        return out
    
    # new forward function —— batch_size > 1
    def forward(self, data):
        data = data.to(self.device)
        
        result = torch.zeros((data.x.shape[0], 2), dtype=torch.float, device=self.device)

        for idx in range(data.x.shape[0]):
            x = data.x[idx] 
            # 确保输入维度正确 [batch_size, seq_len, nfeat]
            if len(x.shape) == 2:
                x = x.unsqueeze(0)  # 添加batch维度
            
            # LSTM前向传播
            lstm_out, (hidden, cell) = self.lstm(x)

            # 使用最后一个时间步的输出
            # 对于双向LSTM，需要连接前向和后向的最后一个隐藏状态
            last_hidden = torch.cat((lstm_out[:, data.asm_count[idx].item()-1, :self.nhid], 
                                   lstm_out[:, 0, self.nhid:]), dim=1)
            # print(last_hidden.shape)
            # 通过分类器得到预测结果
            out = self.classifier(last_hidden)
            result[idx] = out
        return result


    
class LSTMVisual(nn.Module):
    def __init__(self, base_mod, vis_info_dir, device=None, dropout=0.5):
        super(LSTMVisual, self).__init__()
        self.base_mod = base_mod
        self.vis_info_dir = vis_info_dir
        self.device = device
        self.dropout = dropout

        if device is None:
            self.device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
        else:
            self.device = device
            
        if os.path.exists(self.base_mod):
            mod = LSTM(nfeat=128, nhid=512, nclass=2)
            mod.load_state_dict(torch.load(self.base_mod, map_location=torch.device("cuda:0" if torch.cuda.is_available() else "cpu")))
            self.lstm = mod.lstm
            self.classifier = mod.classifier
            self.nhid = mod.nhid
        else:
            print('ERROR: LSTM File Can Not Be Loaded!')
            exit(-1)
        self.to(self.device)

    def forward(self, data):
        data = data.to(self.device)
        file_name = data.file_name[0].split('/')[-5:]
        target_file_name = '@'.join(file_name).replace('.json', '.pkl')
        target_file_route = os.path.join(self.vis_info_dir, target_file_name)

        addr_ls = data.addr_ls[0]
        result = []
        seq_len = data.asm_count.item()
        x = data.x[:, :seq_len, :]
        if len(x.shape) == 2:
            x = x.unsqueeze(0) 
        lstm_out, (hidden, cell) = self.lstm(x)
        while seq_len > 0:
            last_hidden = lstm_out[:, seq_len-1, :]
            out = self.classifier(last_hidden)
            result.append([out, addr_ls[seq_len-1]])
            seq_len -= 1

        if (not os.path.exists(target_file_route)):
            f = open(target_file_route, 'wb')
            pkl_dict = {}
            pkl_dict[target_file_name] = result
            pickle.dump(pkl_dict, f)
            f.close()
        else:
            f = open(target_file_route, 'rb')
            pkl_dict = pickle.load(f)
            pkl_dict[target_file_name] = result
            f.close()
            f = open(target_file_route, 'wb')
            pickle.dump(pkl_dict, f)
            f.close()

        # seq_len = data.asm_count
        # x = data.x[:, :seq_len, :]
        # if len(x.shape) == 2:
        #     x = x.unsqueeze(0) 
        # lstm_out, (hidden, cell) = self.lstm(x)
        last_hidden = torch.cat((lstm_out[:, data.asm_count.item()-1, :self.nhid], 
                               lstm_out[:, 0, self.nhid:]), dim=1)
        out = self.classifier(last_hidden)   
        return out

        