#!/usr/bin/env python
# -*- coding:utf-8 -*-
# @Time    : 2022/3/20 1:28 下午
# @Author  : WangZhixing
import argparse
import os
import shutil
import sys

from ProcessData.Process import SymbolVector, FileVector

curPath = os.path.abspath(os.path.dirname(os.path.dirname(__file__)))
rootPath = os.path.split(curPath)[0]
sys.path.append(rootPath)

import argparse
from sklearn.cluster import KMeans

from Metric import Metric
from Output.output_mehod.result2rsf_file import result2rsf_file
from ProcessData import DependenceGraph,FileVectorDependenceGraph
from Utils import ConfigFile
from Model.Module.Gcnconv1 import Gcnconv1
from Model.Module.Gcnconv2 import Gcnconv2
from Model.Module.Gatconv1 import Gatconv1
from Model.Module.Gatconv2 import Gatconv2
import torch
from Model.Module.GraphSageconv1 import GraphSageconv1
from Model.Module.GraphSageconv2 import GraphSageconv2


def train(model, loader, optimizer, data, device):
    model.train()
    total_loss = 0
    for pos_rw, neg_rw in loader:
        optimizer.zero_grad()
        out = model(data.x, data.edge_index, data.edge_attr)
        loss = model.loss(out, pos_rw.to(device), neg_rw.to(device))
        loss.backward()
        optimizer.step()
        total_loss += loss.item()
    return total_loss / len(loader)


def NodeGCN(data,**kwarg):
    if kwarg['model_name'] == "gcn":
        # if kwarg['layer'] == 1:
            # 图深度是否太大，是否需要增加层数1-10
            # 门控图神经网络

        model = Gcnconv1(data=data,num_feature=data.num_features, **kwarg).to(kwarg['device'])
        # elif kwarg['layer'] == 2:
        #     model = Gcnconv2(edge_index=data.edge_index, **kwarg).to(kwarg['device'])
        loader = model.loader(batch_size=128, shuffle=False)
        optimizer = torch.optim.Adam(model.parameters(), lr=kwarg['lr'], weight_decay=5e-4)
        for epoch in range(1, kwarg['train_epoch']):
            loss = train(model, loader, optimizer, data, kwarg['device'])
        with torch.no_grad():
            z = model(data.x, data.edge_index, data.edge_attr)
        kmeans_input = z
        kmeans = KMeans(n_clusters=kwarg['cluster'], random_state=0).fit(kmeans_input)
        preds = kmeans.predict(kmeans_input)
        return kmeans_input, preds

    elif kwarg['model_name'] == "gat":
        kwarg['num_features'] = data.num_features
        kwarg['num_nodes'] = None
        kwarg['sparse'] = None
        # if kwarg['layer'] == 1:
        model = Gatconv1(edge_index=data.edge_index, **kwarg).to(kwarg['device'])
        # elif kwarg['layer'] == 2:
        #     model = Gatconv2(edge_index=data.edge_index, **kwarg).to(kwarg['device'])

        loader = model.loader(batch_size=128, shuffle=False)
        optimizer = torch.optim.Adam(model.parameters(), lr=0.01, weight_decay=5e-4)
        for epoch in range(1, kwarg['train_epoch']):
            loss = train(model, loader, optimizer, data, kwarg['device'])
        with torch.no_grad():
            z = model(data.x, data.edge_index, data.edge_attr)
            kmeans_input = z
            kmeans = KMeans(n_clusters=kwarg['cluster'], random_state=0).fit(kmeans_input)
            preds = kmeans.predict(kmeans_input)
            return kmeans_input, preds
    elif kwarg['model_name'] == "graphsage":
        kwarg['num_features'] = data.num_features
        kwarg['walk_length'] = 10
        kwarg['context_size'] = 10
        kwarg['walks_per_node'] = 10
        kwarg['num_negative_samples'] = 1
        kwarg['num_nodes'] = None
        kwarg['p'] = 1
        kwarg['q'] = 1
        kwarg['sparse'] = None
        if kwarg['layer'] == 1:
            model = GraphSageconv1(edge_index=data.edge_index, **kwarg).to(kwarg['device'])
        elif kwarg['layer'] == 2:
            model = GraphSageconv2(edge_index=data.edge_index, **kwarg).to(kwarg['device'])

        loader = model.loader(batch_size=128, shuffle=False)
        optimizer = torch.optim.Adam(model.parameters(), lr=0.01, weight_decay=5e-4)
        for epoch in range(1, kwarg['train_epoch']):
            loss = train(model, loader, optimizer, data, kwarg['device'])
        with torch.no_grad():
            z = model(data.x, data.edge_index, data.edge_attr)
        kmeans_input = z
        kmeans = KMeans(n_clusters=kwarg['cluster'], random_state=0).fit(kmeans_input)
        preds = kmeans.predict(kmeans_input)
        return kmeans_input, preds


if __name__ == '__main__':
    parser = argparse.ArgumentParser(description="ReadConfig")
    parser.add_argument("-c", "--config", type=str,default=
    "/Users/wzx/Downloads/module-reverse-by-gnn/config/client/File_symbol_vector_un/include.ini")
    args = parser.parse_args()
    kwarg = ConfigFile(args.config).ReadConfig()

    # if kwarg["data_type"] == "symbol":
    #     FileVector(kwarg['root'])

    if os.path.exists(os.path.join(kwarg["root"], "processed")):
        shutil.rmtree(os.path.join(kwarg["root"], "processed"))

    # data = FileVectorDependenceGraph(kwarg["root"]).data
    data = DependenceGraph(kwarg["root"]).data

    clusters= [1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20]
    res = {}
    cishu = 1
    for cluster in clusters:
        kwarg['cluster'] = cluster
        a2a = 0
        c2c05=0
        c2c03=0
        c2c01=0
        for i in range(cishu):
            _, preds = NodeGCN(data, **kwarg)
            result2rsf_file(kwarg["root"], preds, kwarg["outfile_path"])
            m = Metric(kwarg["project"], kwarg["outfile_path"], kwarg["ground_path"])
            a2a+=m[1]
            c2c05+=m[2]
            c2c03 += m[3]
            c2c01 += m[4]
        a2a/=cishu
        c2c05/=cishu
        c2c03/=cishu
        c2c01/=cishu
        res[cluster]=[a2a,c2c05,c2c03,c2c01]
    print(res)
