import os
import sys
import torch
import nn.api as nn_api
import nn.simplest_gcn as simplest_gcn
import nn.transformer as transformer
import nn.lstm as lstm
from tasks.bvd.graph_class import *
from tasks.bvd.annotation_class import *
from tasks.bvd.graph_constructor import json_constructor
from tasks.bvd.annotation_parser import annotation_json_parser
import pickle
from .dataset_class import TransformerDataset, GCNDataset, LSTMDataset
import numpy as np
from data.api import iter_dataset, init_log
from torch_geometric.data import Data,Batch
import time
import builtins
import warnings

# mod_type = 'GCN'
# if mod_type == 'GCN':
#     from torch_geometric.loader import DataLoader # only gcn or graph mod
# else :
#     from torch.utils.data import DataLoader

# 宏变量声明
# GCN
# dataset_save_pkl = r'preprocessed_data/samples/cwe121_v1.2_o2.pkl'
# dataset_dir = r'preprocessed_data/CWE121_Stack_Based_Buffer_Overflow_O2'
# nn_file = r'mods/simplest_v1.2_o2.pt'
# pkl_savefile = r'preprocessed_data/cfg_graph_o2.pkl'
# log_dir = r'logs/run_log.txt'

# GCN newest
# dataset_save_pkl = r'preprocessed_data/samples/cwe121_v1.0_o0_newest.pkl'
# dataset_dir = r'preprocessed_data/CWE121_Stack_Based_Buffer_Overflow_O0_filtered_newest20241110'
# nn_file = r'mods/simplest_v1.0_newest.pt'
# pkl_savefile = r'preprocessed_data/cfg_graph_o0_newest.pkl'
# log_dir = r'logs/run_log_newest.txt'

# GCN For addon dataset : O0/O1/O2/O3 + default/deadcode
dataset_save_pkl = r'preprocessed_data/addon_test/addon_dataset_pkl/'
dataset_dir = r'preprocessed_data/addon_test/addon_juliet_dataset/'
nn_file = r'mods/addon_test/'
pkl_savefile = r'preprocessed_data/addon_test/addon_cfg_graph/'
log_dir = r'logs/addon_test/'

# Transformer
# dataset_save_pkl = r'preprocessed_data/samples/cwe121_rearranged.pkl'
# dataset_dir = r'preprocessed_data/cfg_transferred/CWE121_Stack_Based_Buffer_Overflow'
# nn_file = r'mods/transformer_v1.0.pt'
# pkl_savefile = r'preprocessed_data/cfg_graph_rearranged.pkl'
# log_dir = r'logs/run_log.txt'

# FUNCTION FOR OLD USE
def run_multiple_dataset():
    global dataset_save_pkl, dataset_dir, nn_file, pkl_savefile, log_dir

    # dataset_raw_ls = list(reversed(os.listdir(dataset_dir)))
    dataset_raw_ls = list(os.listdir(dataset_dir))
    # print(dataset_raw_ls)
    dataset_ls = []
    for dataset in dataset_raw_ls:
        if str(dataset + '.pt') not in os.listdir(nn_file):
            dataset_ls.append(dataset)

    dataset_dir_ls = [dataset_dir + dataset for dataset in dataset_ls]
    dataset_save_pkl_ls = [dataset_save_pkl + dataset + '.pkl' for dataset in dataset_ls]
    nn_file_ls = [nn_file + dataset + '.pt' for dataset in dataset_ls]
    pkl_savefile_ls = [pkl_savefile + dataset + '.pkl' for dataset in dataset_ls]
    log_dir_ls = [log_dir + dataset + '_log.txt' for dataset in dataset_ls]

    for i in range(0, len(dataset_ls)):
        dataset_dir = dataset_dir_ls[i]
        dataset_save_pkl = dataset_save_pkl_ls[i]
        nn_file = nn_file_ls[i]
        pkl_savefile = pkl_savefile_ls[i]
        log_dir = log_dir_ls[i]

        run_single_dataset()

def run_single_dataset():

    global dataset_save_pkl, dataset_dir, nn_file, pkl_savefile, log_dir
    init_log(log_dir)
    all_files=iter_dataset(dataset_dir)
    
    objects=[]
    
    before_io = time.time()
    if os.path.exists(pkl_savefile):
        with open(pkl_savefile, 'rb') as f:
            objects = pickle.load(f)
    else:
        for files,label in all_files:
            cfg=files[1]
            factory = json_constructor(cfg, 'cfg')
            objects.append((factory.construct_by_inst(),label))

        with open(pkl_savefile, 'wb') as f:
            pickle.dump(objects ,f)
    after_io = time.time()
    print('load json total time (seconds):', after_io - before_io) # 生成一轮 total time (seconds): 100.44184446334839
    
    dataset=GCNDataset(objects, dataset_save_pkl)
    # dataset = TransformerDataset(objects, dataset_save_pkl)
    train_dataset,test_dataset=torch.utils.data.random_split(dataset,[int(len(dataset)*0.8),len(dataset)-int(len(dataset)*0.8)])

    # GCN
    train_loader=DataLoader(train_dataset,batch_size=128,shuffle=True)
    test_loader=DataLoader(test_dataset,batch_size=128,shuffle=False)
    model = simplest_gcn.GCN(nfeat=128, nhid=256, nclass=2)

    # Transformer
    # def collate_fn(batch):
    #     batch_size = len(batch)
    #     batch_data = Batch()
    #     batch_data.y = torch.tensor([data.y for data in batch])
    #     batch_data.x = torch.zeros((batch_size, 1000, 128),  dtype=torch.float)
    #     for i, data in enumerate(batch):
    #         batch_data.x[i] = data.x
    #     return batch_data
    # train_loader=DataLoader(train_dataset,batch_size=128,shuffle=True,collate_fn=collate_fn)
    # test_loader=DataLoader(test_dataset,batch_size=128,shuffle=False,collate_fn=collate_fn)
    # model = transformer.GrpahTransformer(nfeat=128, nhead=16, nhid=512, nclass=2)

    if os.path.exists(nn_file):
        model.load_state_dict(torch.load(nn_file, map_location=torch.device("cuda:0" if torch.cuda.is_available() else "cpu")))
    # nn_api.train_model(model,train_loader,test_loader,device=torch.device("cuda:0" if torch.cuda.is_available() else "cpu"),out_file=nn_file,lr=1e-4,epoch_number=1500)
    # nn_api.train_model(model,train_loader,test_loader,device=torch.device("cuda:0" if torch.cuda.is_available() else "cpu"),lr=1e-4,epoch_number=5000,out_file=nn_file)
    nn_api.train_model(model,train_loader,test_loader,device=torch.device("cuda:0" if torch.cuda.is_available() else "cpu"),lr=1e-4,epoch_number=3000,out_file=nn_file)


# FUNTION FOR API
def run_gcn_single_dataset(dataset_dir, dataset_class_save_file, graph_save_file, model_dir, log_file, seed, annotation_dir=None, annotation_usage='repeat'):

    from torch_geometric.loader import DataLoader
    init_log(log_file)
    all_files = iter_dataset(dataset_dir)
    objects = []
    
    before_io = time.time()
    if graph_save_file is not None and os.path.exists(graph_save_file):
        with open(graph_save_file, 'rb') as f:
            objects = pickle.load(f)
    else:
        for files, label in all_files:
            cfg = files[1]
            factory = json_constructor(cfg, 'cfg')
            objects.append((factory.construct_by_inst(),label))

        with open(graph_save_file, 'wb') as f:
            pickle.dump(objects ,f)
    after_io = time.time()
    print('load json total time (seconds):', after_io - before_io) # 生成一轮 total time (seconds): 100.44184446334839
    
    if annotation_dir is None:
        dataset = GCNDataset(objects, dataset_class_save_file)
    else:
        dataset = GCNDataset(objects, dataset_class_save_file, annotation_dir, annotation_usage)

    if seed is not None:
        train_dataset, test_dataset = torch.utils.data.random_split(dataset, [int(len(dataset)*0.8), len(dataset)-int(len(dataset)*0.8)], generator=torch.Generator().manual_seed(seed))
    else:
        train_dataset, test_dataset = torch.utils.data.random_split(dataset, [int(len(dataset)*0.8), len(dataset)-int(len(dataset)*0.8)])

    if annotation_dir is None:
        train_loader = DataLoader(train_dataset, batch_size=128, shuffle=True)
        test_loader = DataLoader(test_dataset, batch_size=128, shuffle=False)
    else: # 由于注解利用需要逐个sample地读注解进行处理，所以暂时没设计多batch加速的代码，如有需要再做填补
        train_loader = DataLoader(train_dataset, batch_size=1, shuffle=False)
        test_loader = DataLoader(test_dataset, batch_size=1, shuffle=False)

    if annotation_dir is None:
        model = simplest_gcn.GCN(nfeat=128, nhid=256, nclass=2, batch_size=128)
    else:
        model = simplest_gcn.GCN(nfeat=128, nhid=256, nclass=2, batch_size=1, annotation_dir=annotation_dir, annotation_usage=annotation_usage)

    if os.path.exists(model_dir):
        model.load_state_dict(torch.load(model_dir, map_location=torch.device("cuda:0" if torch.cuda.is_available() else "cpu")))
    nn_api.train_model(model, train_loader, test_loader, device=torch.device("cuda:0" if torch.cuda.is_available() else "cpu"), lr=1e-4, epoch_number=3000, out_file=model_dir)

def run_transformer_single_dataset(dataset_dir, dataset_class_save_file, graph_save_file, model_dir, log_file, seed, annotation_dir=None):
    
    from torch.utils.data import DataLoader
    init_log(log_file)
    all_files = iter_dataset(dataset_dir)
    objects = []
    
    before_io = time.time()
    if os.path.exists(graph_save_file):
        with open(graph_save_file, 'rb') as f:
            objects = pickle.load(f)
    else:
        for files, label in all_files:
            cfg = files[1]
            factory = json_constructor(cfg, 'cfg')
            objects.append((factory.construct_by_inst(),label))

        with open(graph_save_file, 'wb') as f:
            pickle.dump(objects ,f)
    after_io = time.time()
    print('load json total time (seconds):', after_io - before_io) # 生成一轮 total time (seconds): 100.44184446334839
    
    if annotation_dir is None:
        dataset = TransformerDataset(objects, dataset_class_save_file)
    else:
        dataset = TransformerDataset(objects, dataset_class_save_file, annotation_dir)

    if seed is not None:
        train_dataset, test_dataset = torch.utils.data.random_split(dataset, [int(len(dataset)*0.8),len(dataset)-int(len(dataset)*0.8)], generator=torch.Generator().manual_seed(seed))
    else:
        train_dataset, test_dataset = torch.utils.data.random_split(dataset, [int(len(dataset)*0.8),len(dataset)-int(len(dataset)*0.8)])

    def collate_fn(batch):
        batch_size = len(batch)
        batch_data = Batch()
        batch_data.y = torch.tensor([data.y for data in batch])
        batch_data.x = torch.zeros((batch_size, 1000, 128), dtype = torch.float)
        for i, data in enumerate(batch):
            batch_data.x[i] = data.x
        return batch_data
    train_loader = DataLoader(train_dataset, batch_size=32, shuffle=True, collate_fn=collate_fn)
    test_loader = DataLoader(test_dataset, batch_size=32, shuffle=False, collate_fn=collate_fn)
    model = transformer.GrpahTransformer(nfeat=128, nhead=8, nhid=256, nclass=2)

    if os.path.exists(model_dir):
        model.load_state_dict(torch.load(model_dir, map_location=torch.device("cuda:0" if torch.cuda.is_available() else "cpu")))
    nn_api.train_model(model, train_loader, test_loader, device=torch.device("cuda:0" if torch.cuda.is_available() else "cpu"), lr=1e-4, epoch_number=3000, out_file=model_dir)

def run_lstm_single_dataset(dataset_dir, dataset_class_save_file, graph_save_file, model_dir, log_file, seed, annotation_dir=None):

    from torch.utils.data import DataLoader
    init_log(log_file)
    all_files = iter_dataset(dataset_dir)
    objects = []
    
    before_io = time.time()
    if os.path.exists(graph_save_file):
        with open(graph_save_file, 'rb') as f:
            objects = pickle.load(f)
    else:
        for files, label in all_files:
            cfg = files[1]
            factory = json_constructor(cfg, 'cfg')
            objects.append((factory.construct_by_inst(),label))

        with open(graph_save_file, 'wb') as f:
            pickle.dump(objects ,f)
    after_io = time.time()
    print('load json total time (seconds):', after_io - before_io) 
    
    dataset = LSTMDataset(objects, dataset_class_save_file)

    if seed is not None:
        train_dataset, test_dataset = torch.utils.data.random_split(dataset, [int(len(dataset)*0.8),len(dataset)-int(len(dataset)*0.8)], generator=torch.Generator().manual_seed(seed))
    else:
        train_dataset, test_dataset = torch.utils.data.random_split(dataset, [int(len(dataset)*0.8),len(dataset)-int(len(dataset)*0.8)])
    
    def collate_fn(batch):
        batch_size = len(batch)
        batch_data = Batch()
        batch_data.y = torch.tensor([data.y for data in batch])
        batch_data.x = torch.zeros((batch_size, 1000, 128), dtype = torch.float)
        batch_data.asm_count = torch.tensor([data.asm_count for data in batch])
        for i, data in enumerate(batch):
            batch_data.x[i] = data.x
        return batch_data
    train_loader = DataLoader(train_dataset, batch_size=1, shuffle=False, collate_fn=collate_fn)
    test_loader = DataLoader(test_dataset, batch_size=1, shuffle=False, collate_fn=collate_fn)
    
    model = lstm.LSTM(nfeat=128, nhid=512, nclass=2, batch_size=1)

    if os.path.exists(model_dir):
        model.load_state_dict(torch.load(model_dir, map_location=torch.device("cuda:0" if torch.cuda.is_available() else "cpu")))
    nn_api.train_model(model, train_loader, test_loader, device=torch.device("cuda:0" if torch.cuda.is_available() else "cpu"), lr=1e-3, epoch_number=3000, out_file=model_dir)


if __name__ == '__main__':
    # run()
    run_single_dataset()