from __future__ import print_function, unicode_literals
import copy
import collections
import ctypes
import subprocess
import tempfile
import shutil
import os
import platform
import sys
import _ctypes

from .compilesim import CompiledSimulation

from .core import working_block, Block, LogicNet, set_working_block
from .wire import Input, Output, Const, WireVector, Register
from .memory import MemBlock, RomBlock
from .pyrtlexceptions import PyrtlError, PyrtlInternalError
from .simulation import SimulationTrace, _trace_sort_key

import dgl
import math
import torch
from .uilt import macro, Tarjan, merge_LogicNet, MFFC
from .essent import ESSENT, CondPart, AcyclicPart, distinct, convertIntoCPStmts, StatementGraph,Graph

try:
    from collections.abc import Mapping
except ImportError:
    from collections import Mapping

__all__ = ['GPUSim']


def get_key(dict, value):
    return [k for k, v in dict.items() if v == value]

def wv_is_args(net, w):
    for arg in net.args:
        if arg.name == w.name:
            return True
    return False


def wv_is_dests(net, w):
    for dest in net.dests:
        if dest.name == w.name:
            return True
    return False

class GPUSim(CompiledSimulation, ESSENT):
    def __init__(self, filename, tnum, aps, tracer=True, block=None, register_value_map={}, memory_value_map={},
                 default_value=0,maxloopsize=20000,optimization=3):
        self._dll = self._dir = None
        self.filename=filename
        self.optimization = optimization
        self.stepcount = 131072
        self.W = 16384
        self.tnum = tnum
        self.maxloopsize=maxloopsize
        # if not nsteps:
        #     raise PyrtlError('need to supply a number of steps to simulate')
        self.block = working_block(block)
        # for i in self.block:
        #     print(i)

        # print(self.block)
        self.block.sanity_check()

        if tracer is True:
            tracer = SimulationTrace()
        self.tracer = tracer
        self._remove_untraceable()
        self._uid_counter = 0
        self.default_value = default_value
        self._regmap = {}  # Updated below
        self._memmap = memory_value_map
        print('reg number:'+str(len(self.block.wirevector_subset(Register))))
        for r in self.block.wirevector_subset(Register):
            rval = register_value_map.get(r, r.reset_value)
            if rval is None:
                rval = self.default_value
            self._regmap[r] = rval

        self.tracer._set_initial_values(default_value, self._regmap, self._memmap)

        self.gpu_varname = {}
        for w in self.block.wirevector_set:
            vn = CompiledSimulation._clean_name(self, 'w', w)
            self.gpu_varname[w] = [vn, 0, vn]

        self.varname = {}
        self.merge_inter_wv = []
        self.loops_block = {}
        self.loops_input = set()  # 所有cpuloop的输入
        self.w_conn_loop = {}  # {w:set(loop1,loop1)}
        self.loops_output = set()  # 所有loops的输出
        self.macro_in_layer = set()
        self.macro_nodes = []
        self.macro_loop_index = [] #有环在的层
        self.loop_indexTonet = {} #所有环的indextonet
        self.loop_blocks_outputs = {}
        self.macro_out_reg=[]#只作为输出的reg
        self.loops_inter_reg = set() #只在内部的输出
        self.loops_reg = {} #所有loop的reg
        self.netToindex, self.indexTonet = self._setIndex(self.block)
        # for i in self.indexTonet.items():
        #    print(str(i[0])+':'+str(i[1]))
        # print(self.indexTonet)
        src, dest, init_cycleroot = self._logicnet_conn(self.block, self.netToindex)  # init_cycleroot是init_id
        # print(src)
        # print(dest)
        # print(init_cycleroot)
        init_graph = dgl.graph((torch.tensor(src), torch.tensor(dest)))
        # print(type(graph))
        self.num_node = init_graph.num_nodes()
        init_graph.ndata['id'] = torch.tensor([i for i in range(self.num_node)], dtype=torch.int64)  # id = init_id
        init_graph.ndata['nodetype'] = torch.zeros(self.num_node, dtype=torch.int8)  # 0代表logicnet，1代表loop，2代表mffc分区

        # 合并w操作连接的两个节点
        #print('pre logic num:'+str(len(self.block.logic)))
        #init_graph = self._merge_w(init_graph)  # 图有变动
        #print('logic_num:' + str(len(self.block.logic)))

        if init_cycleroot:
            pre_id = init_graph.ndata['id'].tolist()  # id里是init_id
            self.cyclenode = []
            flag = {}
            for init_root in init_cycleroot:
                root = pre_id.index(init_root)  # 根据init_id获得对应的图节点编号
                # print("init_root:"+str(init_root)+" root:"+str(root))
                for cyc in self.cyclenode:  # init_id
                    if init_root in cyc and isinstance(cyc, list):
                        flag[init_root] = True  # 标记这个init_root已经发现了
                        break  # 跳出这个循环
                if flag.get(init_root, False):  # init_root的环已经被发现了返回true，没有就返回false
                    continue
                else:
                    tar = Tarjan(root, init_graph)
                    if tar.scc_list:  # 有强连通分量
                        cyclenode = tar.scc_list  # 图节点的编号
                        for node in cyclenode:
                            node_list_init = []  # 转换成inin_id
                            for n in node:
                                node_list_init.append(pre_id[n])
                            node_list_init.sort()
                            if node_list_init not in self.cyclenode:
                                self.cyclenode.append(node_list_init)
                    else:  # 没有继续循环
                        # print("root:"+str(root)+"无环")
                        continue
            if self.cyclenode:  # 放的是原始编号
                # print('cycle:')
                # print(self.cyclenode)
                # for c in self.cyclenode:
                #     print(len(c))
                loop_package_g = self._re_conn(self.cyclenode, 1, init_graph)
                # print(loop_package_g)
            else:
                loop_package_g = init_graph
        else:
            loop_package_g = init_graph
        #self.g=loop_package_g
        #print(loop_package_g)
        for w in self.loops_input:
            self.w_conn_loop[w]=set()

        self.loop_nodes = [index for index, id in enumerate(loop_package_g.ndata['nodetype'].tolist()) if id == 1]
        # print(self.loop_nodes)
        self.macro_loop_index = [loop_package_g.ndata['id'].tolist()[node] for node in self.loop_nodes]
        #print(self.macro_loop_index)

        mffc = self._merge_mffc(loop_package_g,aps)
        mffcs_nodeslist = []  # 要封住的mffc内节点的logicnet id
        logicnet_index = loop_package_g.ndata['id'].tolist()
        # print(logicnet_index)
        for item in mffc.items():
            if len(item[1]) > 1:
                mffc_nodeslist = []
                for graphid in item[1]:
                    mffc_nodeslist.append(logicnet_index[graphid])
                mffcs_nodeslist.append(mffc_nodeslist)
        # print(mffcs_nodeslist)
        self.g = self._re_conn(mffcs_nodeslist, 2, loop_package_g)

        # print(self.g.edges())
        # for i in self.indexTonet.items():
        #    print(str(i[0])+':'+str(i[1]))
        self.re_layer,self.loop_locate = self._re_layers()
        #print(self.re_layer)
        #print(self.loop_locate) #{layer:[index,]}
        #print(self.macro_in_layer)
        self.blocknum_list = []
        self.itnum_list = []
        for layer in self.re_layer:
            l=len(layer)
            if l >= 64:
                blocknum=1
            else:
                blocknum = 1
                #blocknum = math.pow(2, math.floor(math.log2(64 // l)))
            #print(blocknum)
            self.blocknum_list.append(int(blocknum))
            itnum = (self.W//self.tnum)//int(blocknum)
            self.itnum_list.append(int(itnum))
        for layer in self.macro_in_layer:
            self.blocknum_list[layer]=1
            self.itnum_list[layer]=self.W//self.tnum

        self.layercount = len(self.re_layer)
        print('layer number:'+str(self.layercount))

        self.layer_long_keep_wire = self._lkpin_w()
        # print('long_keep_wire:'+str(self.layer_long_keep_wire))
        self.mid_declare, self.mid_outpin = self._midout_pin()  # 每层不包括跨层输出节点的输出总数
        re_layer = self.re_layer
        for item in self.loop_locate.items():
            for index in item[1]:
                for n in re_layer[item[0]]:
                    if index == n[0]:
                        re_layer[item[0]].remove(n)
        # print(re_layer)
        self.netSequence = self._idtonet(re_layer)
        # print(self.mid_outpin)
        # for i in self.netSequence:
        #    for j in i:
        #        print(j)
        #    print('~~~~~~~~~~~~~~~~~~~~~')
        self.loops_inter_wv = {}
        for loop in self.macro_loop_index:
            net = self.indexTonet[loop]
            inter_wv = self.make_macro_block(loop, net)
            self.loops_inter_wv[loop] = inter_wv

        self.create_code(filename)


    def make_input(self,provide_inputs):

        self._dir = os.getcwd().replace('\\', '/') + '/'
        inuptfile = os.getcwd().replace('\\', '/') + '/' + self.filename + '_inputs.cuh'
        with open(inuptfile, 'w') as finputs:
            finputs.write('#include <stdint.h>\n')
            finputs.write('#include <stdlib.h>\n')
            finputs.write('#include <string.h>\n')
            finputs.write('#include <math.h>\n')
            finputs.write('#include <cuda_runtime.h>\n')
            finputs.write('#include <stdio.h>\n')
            finputs.write('#include <sys/time.h>\n')
            finputs.write('#define stepcount {stepcount}\n'.format(stepcount=self.stepcount))

            #print(provide_inputs)
            inputs = list(self.block.wirevector_subset(Input))

            for w in inputs:
                #print(provide_inputs[w.name])
                finputs.write('static uint64_t {vn}[{limb}][stepcount]={val};\n'.format(vn=self.gpu_varname[w][0],
                                                                         limb=CompiledSimulation._limbs(self, w),
                                                                         val='{'+','.join(list(map(str,provide_inputs[w.name])))+'}'))

    def _setIndex(self, block):
        logicnetToindex = {}
        indexTologicnet = {}
        index_id = 0
        for logic in block:
            logicnetToindex[logic] = index_id
            indexTologicnet[index_id] = logic
            index_id += 1
        self.num_logicnet = index_id
        return logicnetToindex, indexTologicnet

    # 确定logicnet之间的连接关系
    def _logicnet_conn(self, block, index_logicnet):
        src_list = []
        dest_list = []
        cycleroot = []
        for item in index_logicnet.items():
            # print("########################################")
            # print(str(item[0])+":"+str(item[1]))
            if item[0].op == 'r':
                # cycleroot.append(item[1])
                foundspace = list(index_logicnet.keys())
            elif item[0].op == '@':
                # cycleroot.append(item[1])
                foundspace = self.block.logic_subset('m')
            else:
                foundspace = list(index_logicnet.keys())[item[1] + 1:]
            for net in foundspace:
                if item[0].op == '@':
                    if item[0].op_param[0] == net.op_param[0]:
                        # print(net)
                        if item[1] > index_logicnet[net]:
                            cycleroot.append(item[1])
                        src_list.append(item[1])
                        dest_list.append(index_logicnet[net])
                else:
                    conn_flag = False
                    for item_dest in item[0].dests:
                        for net_arg in net.args:
                            if item_dest.name == net_arg.name:
                                # print(net)
                                if item[0].op == 'r' and item[1] > index_logicnet[net]:
                                    cycleroot.append(item[1])
                                src_list.append(item[1])
                                dest_list.append(index_logicnet[net])
                                conn_flag = True
                                break
                        if conn_flag:
                            break

        return src_list, dest_list, cycleroot

    def _layers(self, layerlist, cyclenode):
        all_sub_cycle = []
        cyclelayerid = set()
        if len(cyclenode):
            for i, layer in enumerate(layerlist):
                # print(layer)
                layer_sub_cycle = []
                for node in layer:
                    if node in cyclenode:
                        cyclelayerid.add(i)
                    else:
                        layer_sub_cycle.append(node)
                all_sub_cycle.append(layer_sub_cycle)
        return all_sub_cycle, cyclelayerid

    def _merge_mffc(self, loop_package_g,partition_size=20):
        nodes_num = loop_package_g.num_nodes()
        inNeigh = []
        outNeigh = []
        for id in range(nodes_num):
            inNeigh.append(loop_package_g.predecessors(id).tolist())
            outNeigh.append(loop_package_g.successors(id).tolist())
        ESSG = Graph()
        ESSG.inNeigh = inNeigh
        ESSG.outNeigh = outNeigh
        ap = AcyclicPart(ESSG, self.loop_nodes)
        ap.coarsenWithMFFCs(partition_size)
        ap.mergeSingleInputPartsIntoParents(partition_size)
        #ap.mergeSmallParts(smallPartCutoff=partition_size)
        #ap.mergeSmallPartsDown(smallPartCutoff=partition_size)
        mffc = ap.mg.mergeIDToMembers
        return mffc

    def _merge_w(self, graph):
        # print('merge_w')
        op_w = self.block.logic_subset('w')
        for i in range(1):
            del_w = set()
            del_logic = set()
            l_nl = {}
            for item in self.netToindex.items():
                if item[0].op == 'w':
                    init_index = item[1]
                    logicnet = self.indexTonet[item[1]]
                    pre_id = graph.ndata['id'].tolist()
                    index = pre_id.index(init_index)
                    if True:
                        # print('w:' + str(logicnet))
                        innode = graph.in_edges(index)[0].tolist()  # 这个w操作节点的输入节点，len<=1
                        outnode = graph.out_edges(index)[1].tolist()  # 这个w操作节点的输出连接到的节点
                        if len(innode) <= 1 and len(outnode) > 0:  # 用w的输入替换out的输入
                            # print('至少一个后继')
                            orig_wire = logicnet.dests[0]
                            del_w.add(orig_wire)
                            new_src = logicnet.args[0]
                            for n in outnode:
                                dst = self.indexTonet[pre_id[n]]
                                # if d in del_logic:
                                #     dst = l_nl[dst]

                                # print(str(n)+' old_net '+str(pre_id[n])+':' + str(dst))
                                new_net = LogicNet(
                                    op=dst.op, op_param=dst.op_param, dests=dst.dests,
                                    args=tuple(new_src if w is orig_wire else w for w in dst.args))
                                # print('new_net:' + str(new_net))
                                if len(innode) > 0:
                                    graph.add_edges(innode[0], n)

                                self.block.add_net(new_net)
                                self.indexTonet[pre_id[n]] = new_net
                                # l_nl[dst]=new_net
                                del_logic.add(dst)
                            graph.remove_nodes(index)
                            # print('remove:' + str(init_index))
                            del self.indexTonet[init_index]
                            del_logic.add(logicnet)

                        elif len(innode) == 1 and len(outnode) == 0:  # w的输出替换in的输出
                            # print('没有后继')
                            if len(graph.out_edges(innode[0])[1].tolist()) == 1:
                                src = self.indexTonet[pre_id[innode[0]]]
                                if src.op != 'r':
                                    # if src in del_logic:
                                    #     src = l_nl[dst]
                                    orig_wire = logicnet.args[0]
                                    del_w.add(orig_wire)
                                    new_dst = logicnet.dests[0]
                                    new_net = LogicNet(
                                        op=src.op, op_param=src.op_param, args=src.args,
                                        dests=tuple(new_dst if w is orig_wire else w for w in src.dests))
                                    # print('old_net ' + str(pre_id[innode[0]]) + ':' + str(src))
                                    # print('new_net:' + str(new_net))
                                    # print('remove:' + str(logicnet))
                                    # self.block.logic.remove(logicnet)
                                    # self.block.logic.remove(src)
                                    graph.remove_nodes(index)
                                    self.block.add_net(new_net)
                                    # print('remove:' + str(init_index))
                                    del self.indexTonet[init_index]
                                    self.indexTonet[pre_id[innode[0]]] = new_net
                                    # l_nl[logicnet]=new_net
                                    # l_nl[src]=new_net
                                    del_logic.add(logicnet)
                                    del_logic.add(src)



                        else:
                            # print('啥情况')
                            # print(innode)
                            # print(outnode)
                            continue

            for w in del_w:
                self.block.remove_wirevector(w)
            for net in del_logic:
                # print('remove:'+str(net))
                self.block.logic.remove(net)

            if not self.block.logic_subset('w'):
                break

        # for i in self.block.logic:
        #    print(i)

        # print(self.block)
        return graph

    def _re_conn(self, nodeslist, nodetype, graph):

        for j, item in enumerate(nodeslist):  # init_id
            pre_id = graph.ndata['id'].tolist()
            # print(pre_id)
            cycnodes = []  # 对应当前图中的节点
            for init_id in item:
                cycnodes.append(pre_id.index(init_id))
            # print(item)
            # print(cycnodes)
            add_index = self.num_logicnet  ##环作为超节点
            add_node = []
            add_node.append(add_index)
            add_nid = graph.num_nodes()
            graph.add_nodes(1, data={'id': torch.tensor(add_node, dtype=torch.int64),
                                     'nodetype': torch.tensor([nodetype], dtype=torch.int8)})
            in_edges = graph.in_edges(torch.tensor(cycnodes))
            source = in_edges[0].tolist()  # 环中所有点的输入点
            source_dest = in_edges[1].tolist()
            # print('输入：'+str(source))

            s_add_u = []
            macro_args = set()
            s_add_v = []

            for index, node in enumerate(source):
                # print('环内节点：'+str(self.indexTonet[pre_id[source_dest[index]]]))
                # print('其输入节点' + str(node) + str(self.indexTonet[pre_id[node]]))
                # print('原编号' + str(pre_id[node]))
                if node not in cycnodes:  # 如果输入的点不在环内
                    # print('不在环内')
                    s_add_u.append(node)
                    s_add_v.append(add_nid)
                    macro_args.update(
                        set(self.indexTonet[pre_id[node]].dests) & set(
                            self.indexTonet[pre_id[source_dest[index]]].args))
                else:
                    continue

            if len(s_add_u):
                graph.add_edges(s_add_u, s_add_v)

            d_add_u = []
            d_add_v = []
            macro_dests = set()

            macro_block_replace={}

            for cnode in cycnodes:
                replace_nets=[]
                out_flag = 0
                in_flag = 0
                out_edges = graph.out_edges(cnode)
                #print(out_edges)
                for i, out_node in enumerate(out_edges[1].tolist()):
                    if out_node not in cycnodes:
                        out_flag=1
                        #print('输出节点' + str(out_node) + '不在环内')
                        if out_node not in d_add_v:
                            d_add_v.append(out_node)
                            d_add_u.append(add_nid)
                        edge_source = self.indexTonet[pre_id[out_edges[0].tolist()[i]]]
                        macro_dests.update(
                            set(edge_source.dests) & set(self.indexTonet[pre_id[out_node]].args))
                        #print(macro_dests)
                    else:
                        in_flag=1
                        replace_nets.append(pre_id[out_node])
                        # print(self.indexTonet[pre_id[cnode]])
                        #
                        # print(self.indexTonet[pre_id[out_node]])
                        # #print('loop in')
                r_net = pre_id[cnode]
                if in_flag and out_flag:
                    macro_block_replace[r_net] = replace_nets
                if in_flag==0 and out_flag==1 and isinstance(self.indexTonet[r_net].dests[0],Register):
                    self.macro_out_reg.append(self.indexTonet[r_net].dests[0].name)

            #print(macro_block_replace)
            self.loop_blocks_outputs[add_index]=macro_block_replace



            if len(d_add_v):
                graph.add_edges(d_add_u, d_add_v)

            for node in item:
                net = self.indexTonet[node]
                # print(net)
                for arg in net.args:
                    # print(arg)
                    if isinstance(arg, Input) | isinstance(arg, Const):
                        # print('add')
                        macro_args.add(arg)
                    else:
                        continue
                for dest in net.dests:
                    if isinstance(dest, Output):
                        macro_dests.add(dest)
                    else:
                        continue

            m_net = macro(logicnet=nodeslist[j], args=list(macro_args), dests=list(macro_dests))

            self.indexTonet[add_index] = m_net
            if nodetype == 1:
                for arg in macro_args:
                    if not isinstance(arg,Const):
                        self.loops_input.add(arg.name)
                for dest in macro_dests:
                    self.loops_output.add(dest.name)
                self.loop_indexTonet[add_index] = m_net
            self.num_logicnet += 1

            #print(m_net)
            # print(isinstance(m_net, tuple))


            graph.remove_nodes(torch.tensor(cycnodes))
            # print('graph:'+str(graph.edges())
        # for i in loop_blocks_outputs.items():
        #     print(i[0])
        #     print(i[1])
        return graph

    def _re_layers(self):
        pre_id = self.g.ndata['id'].tolist()  # 重构前节点的id
        node_type = self.g.ndata['nodetype'].tolist()
        layers = []
        loop_locate = {}
        self.new_layer_id = []
        topolist = dgl.topological_nodes_generator(self.g)
        for i,layer in enumerate(topolist):
            l = layer.tolist()  # 重构后每层节点的id
            self.new_layer_id.append(l)
            pre_l = []
            loop = []
            for j,node in enumerate(l):  # 对于每层的每个节点
                logic_index = pre_id[node]
                if node_type[node]==1:

                    if len(self.indexTonet[logic_index].logicnet) > self.maxloopsize:
                        loop.append(logic_index)
                pre_l.append([logic_index,node])  # 保存对应重构前节点id
                if logic_index in self.macro_loop_index:
                    self.macro_in_layer.add(i)
            if loop:
                loop_locate[i] = loop
            layers.append(pre_l)
        return layers,loop_locate

    def loop_in_gpu(self,loop_index):
        loop_in_gpu = True
        if loop_index in self.macro_loop_index:
            loop_in_gpu = True
            for item in self.loop_locate.items():
                if loop_index in item[1]:
                    loop_in_gpu = False
        else:
            PyrtlError('Error:not a loop index')
        return loop_in_gpu


    def layer_merge(self):
        merge_layer = []
        merge_layer_list = {}
        merge_layer_index = []
        layer_index = []
        i = 0
        for index, layer in enumerate(self.re_layer):
            # print(index)
            # print(layer)
            if (len(layer) == 1) and (not isinstance(self.indexTonet[layer[0]], macro)):
                layer_index.append(index)
                merge_layer.append(layer[0])
            else:
                if len(merge_layer) > 1:
                    # print(merge_layer)
                    i = i + 1

                    args_all = set()
                    dest_all = set()
                    for node in merge_layer:
                        args_all.update(set(self.indexTonet[node].args))
                        dest_all.update(set(self.indexTonet[node].dests))
                    merge_args = args_all - dest_all
                    merge_dests = dest_all - args_all

                    merge_logicnet_index = self.num_logicnet
                    merge_logicnet = merge_LogicNet(logicnet=merge_layer, args=tuple(merge_args),
                                                    dests=tuple(merge_dests))
                    # print(merge_logicnet)
                    self.indexTonet[merge_logicnet_index] = merge_logicnet
                    self.num_logicnet += 1
                    # self.re_layer[layer_index[0]].insert(0, merge_logicnet_index)
                    # del self.re_layer[layer_index[0]][1]
                    # print(layers)
                    merge_layer_list[merge_logicnet_index] = copy.deepcopy(merge_layer)
                    merge_layer.clear()
                    merge_layer_index.append(copy.deepcopy(layer_index))
                    layer_index.clear()
                else:
                    merge_layer.clear()
            if index == len(self.re_layer) - 1:
                if len(merge_layer):
                    # print(merge_layer)
                    i = i + 1
                    args_all = set()
                    dest_all = set()
                    for node in merge_layer:
                        args_all.update(set(self.indexTonet[node].args))
                        dest_all.update(set(self.indexTonet[node].dests))
                    merge_args = args_all - dest_all
                    merge_dests = dest_all - args_all
                    merge_logicnet_index = self.num_logicnet + i - 1
                    merge_logicnet = merge_LogicNet(logicnet=copy.deepcopy(merge_layer), args=tuple(merge_args),
                                                    dests=(tuple(merge_dests)))
                    # print(merge_logicnet)
                    self.indexTonet[merge_logicnet_index] = merge_logicnet
                    # self.re_layer[layer_index[0]].insert(0, merge_logicnet_index)
                    # del self.re_layer[layer_index[0]][1]
                    # print(layers)
                    merge_layer_list[merge_logicnet_index] = copy.deepcopy(merge_layer)
                    merge_layer.clear()
                    merge_layer_index.append(copy.deepcopy(layer_index))
                    layer_index.clear()
        # tmp = self.re_layer.copy()

        # for index_list in merge_layer_index:
        #     if len(index_list) > 1:
        #         for i in index_list[1:]:
        #             self.re_layer.remove(tmp[i])
        return merge_layer_list

    def _merge_layer_graph(self, merge_layer_list):
        for item in merge_layer_list.items():
            pre_id = self.g.ndata['id'].tolist()
            cycnodes = []
            for init_id in item[1]:
                cycnodes.append(pre_id.index(init_id))
            add_index = item[0]
            add_node = []
            add_node.append(add_index)
            add_nid = self.g.num_nodes()
            self.g.add_nodes(1, data={'id': torch.tensor(add_node)})
            in_edges = self.g.in_edges(torch.tensor(cycnodes))
            source = in_edges[0].tolist()
            # print('输入：'+str(source))

            s_add_u = []
            s_add_v = []
            deal_node = []
            for index, node in enumerate(source):
                if node in deal_node:
                    continue
                else:
                    if node not in cycnodes:  # 如果输入的点不在环内
                        # print('输入节点'+str(node)+'不在环内')
                        deal_node.append(node)
                        s_add_u.append(node)
                        s_add_v.append(add_nid)
                    else:
                        continue

            if len(s_add_u):
                self.g.add_edges(s_add_u, s_add_v)

            out_edges = self.g.out_edges(torch.tensor(cycnodes))  # 环中所有点的输出边
            d_add_u = []
            d_add_v = []
            for i, node in enumerate(out_edges[1].tolist()):
                if node not in cycnodes:
                    # print('输出节点' + str(node) + '不在环内')
                    d_add_v.append(node)
                    d_add_u.append(add_nid)

                else:
                    continue
            if len(d_add_v):
                self.g.add_edges(d_add_u, d_add_v)

            self.g.remove_nodes(torch.tensor(cycnodes))
            # print('graph:'+str(graph.edges())

    def _lkpin_w(self):  # 找出每层中跨层输出的wire
        layers_long_keep_wire = set()
        pre_id = self.g.ndata['id'].tolist()
        for i, layer in enumerate(self.new_layer_id[0:self.layercount - 1]):
            for node in layer:
                lk_w = set(self.indexTonet[pre_id[node]].dests)
                # print(lk_w)
                for w in lk_w:
                    if not isinstance(w, Register) and not isinstance(w,Output):
                        layers_long_keep_wire.add(w.name)

                # for succ in self.g.successors(node).tolist():
                #     if succ not in self.new_layer_id[i + 1]:  # 后继不在下一层
                #         lk_w = set(self.indexTonet[self.pre_id[node]].dests) & set(
                #             self.indexTonet[self.pre_id[succ]].args)
                #         # print(lk_w)
                #         for w in lk_w:
                #             if not isinstance(w, Register):
                #                 layers_long_keep_wire.add(w.name)  # 保存每层跨层输出节点
                #
        return layers_long_keep_wire

    def _idtonet(self,re_layer):
        netSequence = []
        for i in range(self.layercount):  # 创建有N个set对象的列表,用来保存最后返回的序列
            # self.S.append(set())
            netSequence.append([])
        for num, layer in enumerate(re_layer):
            for id in layer:  # 每个节点
                net = self.indexTonet[id[0]]
                netSequence[num].append([net,id[1]])
        return netSequence

    def _midout_pin(self):
        mid_out = {'uint8_t': 0, 'uint16_t': 0, 'uint32_t': 0, 'uint64_t': 0}
        outpin = {}  # 基于重构图的每个节点输出
        netSequence = self._idtonet(self.re_layer)
        for i, layer in enumerate(netSequence):
            uint8 = 0
            uint16 = 0
            uint32 = 0
            uint64 = 0
            for j, n in enumerate(layer):
                item = []
                net = n[0]
                for dest in net.dests:
                    if (dest.name in self.layer_long_keep_wire) | isinstance(dest, Output) | isinstance(dest, Register):
                        val_type = self._val_type(dest)
                        item.append([-1, val_type, dest.name, CompiledSimulation._limbs(self, dest)])
                    else:
                        bitwidth = dest.bitwidth
                        #bitwidth = 64
                        if 0 < bitwidth < 8:
                            uint8 += 1
                            if uint8 > mid_out['uint8_t']:
                                mid_out['uint8_t'] = uint8
                            item.append([uint8 - 1, 'uint8_t', dest.name, 1])
                        elif 8 < bitwidth <= 16:
                            uint16 += 1
                            if uint16 > mid_out['uint16_t']:
                                mid_out['uint16_t'] = uint16
                            item.append([uint16 - 1, 'uint16_t', dest.name, 1])
                        elif 16 < bitwidth <= 32:
                            uint32 += 1
                            if uint32 > mid_out['uint32_t']:
                                mid_out['uint32_t'] = uint32
                            item.append([uint32 - 1, 'uint32_t', dest.name, 1])
                        elif 32 < bitwidth <= 64:
                            uint64 += 1
                            if uint64 > mid_out['uint64_t']:
                                mid_out['uint64_t'] = uint64
                            item.append([uint64 - 1, 'uint64_t', dest.name, 1])
                        else:  # 位宽大于64位
                            limbs = CompiledSimulation._limbs(self, dest)
                            uint64 += limbs
                            if uint64 > mid_out['uint64_t']:
                                mid_out['uint64_t'] = uint64
                            item.append([uint64 - limbs, 'uint64_t', dest.name, CompiledSimulation._limbs(self, dest)])

                outpin[self.new_layer_id[i][j]] = item

        return mid_out, outpin  # , mid_out_exclude_layer0

    def _val_type(self, w):
        val_type = 'uint64_t'
        bitwidth = w.bitwidth
        if 0 < bitwidth < 8:
            val_type = 'uint8_t'
        elif 8 < bitwidth <= 16:
            val_type = 'uint16_t'
        elif 16 < bitwidth <= 32:
            val_type = 'uint32_t'
        elif 32 < bitwidth <= 64:
            val_type = 'uint64_t'
        return val_type

    def _gpu_makeini(self, w, v):
        """ C initializer string for a wire with a given value. """
        pieces = []
        for _ in range(self._limbs(w)):
            pieces.append('{' + str(hex(v & ((1 << 64) - 1))) + '}')
            v >>= 64
        return ','.join(pieces).join('{}')

    def _gpu_getarglimb(self, arg, n, it='it'):
        """ Get the nth limb of the given wire.
        param arg:输入
        param n:limb

        """
        #print(self.loops_input)
        #print(self.loops_output)
        if arg.name not in self.merge_inter_wv:
            if isinstance(arg, Input) or arg.name in self.loops_output:
                if isinstance(arg,Input):
                    return '{vn}[{n}*W+{it}*tnum+tid]'.format(vn=self.gpu_varname[arg][0], n=n, it=it)
                else:
                    return 'loop_{vn}[{n}*W+{it}*tnum+tid]'.format(vn=self.gpu_varname[arg][0], n=n, it=it)
            elif isinstance(arg, Register):
                return '{vn}[{n}][{it}*tnum+tid]'.format(vn=self.gpu_varname[arg][0], it=it,
                                                         n=n) if arg.bitwidth > 64 * n else '0'
            elif arg.name in self.layer_long_keep_wire :
                return '{vn}[{n}][{it}*tnum+tid]'.format(vn=self.gpu_varname[arg][0], it=it,
                                                         n=n) if arg.bitwidth > 64 * n else '0'
            elif isinstance(arg, Const):
                return '{vn}[{n}]'.format(vn=self.gpu_varname[arg][0], n=n) if arg.bitwidth > 64 * n else '0'

            else:
                return '{vn}{flag}[{it}*tnum+tid]'.format(vn=self.gpu_varname[arg][0], it=it,
                                                          flag=int(self.gpu_varname[arg][1]) + int(n))
        else:
            if isinstance(arg, Register):
                return 'shared_{vn}[{n}][tid]'.format(vn=self.gpu_varname[arg][0], n=n) if arg.bitwidth > 64 * n else '0'
            else:
                return '{vn}[{n}][tid]'.format(vn=self.gpu_varname[arg][2], n=n) if arg.bitwidth > 64 * n else '0'

    def _gpu_getdestlimb(self, dest, n,it='it'):
        """ Get the nth limb of the given wire.
        """
        # print(self.merge_inter_wv)
        if dest.name not in self.merge_inter_wv:
            if isinstance(dest, Output) or dest.name in self.loops_input:
                if isinstance(dest,Output):
                    return '{vn}[{n}*W+{it}*tnum+tid]'.format(vn=self.gpu_varname[dest][0], n=n, it=it)
                else:
                    destlimb=[]
                    for index in self.w_conn_loop[dest.name]:
                        destlimb.append('loop{index}_{vn}[{n}*W+{it}*tnum+tid]'.format(vn=self.gpu_varname[dest][0], n=n, it=it,index=index))
                    return '='.join(destlimb)

            if (dest.name in self.layer_long_keep_wire) :
                return '{vn}[{n}][{it}*tnum+tid]'.format(vn=self.gpu_varname[dest][0], it=it,
                                                         n=n) if dest.bitwidth > 64 * n else '0'
            elif isinstance(dest, Register):
                return '{vn}[{n}][{it}*tnum+tid]'.format(vn=self.gpu_varname[dest][0], it=it,
                                                           n=n) if dest.bitwidth > 64 * n else '0'
            else:
                return '{vn}{flag}[{it}*tnum+tid]'.format(vn=self.gpu_varname[dest][0], it=it,
                                                          flag=int(self.gpu_varname[dest][1]) + int(n))
        else:
            return '{vn}[{n}][tid]'.format(vn=self.gpu_varname[dest][2], n=n) if dest.bitwidth > 64 * n else '0'

    def _gpu_for_getarglimb(self, arg, n):
        """ Get the nth limb of the given wire.
        param arg:输入
        param n:limb

        """
        if arg.name not in self.merge_inter_wv:
            if isinstance(arg, Register):
                return '{vn}[{n}][j]'.format(vn=self.gpu_varname[arg][0],
                                             n=n) if arg.bitwidth > 64 * n else '0'
            elif arg.name in self.layer_long_keep_wire:
                return '{vn}[{n}][j]'.format(vn=self.gpu_varname[arg][0],
                                             n=n) if arg.bitwidth > 64 * n else '0'
            elif isinstance(arg, Const):
                return '{vn}[{n}]'.format(vn=self.gpu_varname[arg][0], n=n) if arg.bitwidth > 64 * n else '0'
            elif isinstance(arg, Input):
                return '{vn}[{n}*W+j]'.format(vn=self.gpu_varname[arg][0], n=n)
            else:
                return '{vn}{flag}[j]'.format(vn=self.gpu_varname[arg][0],
                                              flag=int(self.gpu_varname[arg][1]) + int(n))
        else:

            return '{vn}[{n}]'.format(vn=self.gpu_varname[arg][0], n=n) if arg.bitwidth > 64 * n else '0'

    def _gpu_for_getdestlimb(self, dest, n):
        """ Get the nth limb of the given wire.
        """
        # print(self.merge_inter_wv)
        if dest.name not in self.merge_inter_wv:
            if (dest.name in self.layer_long_keep_wire):
                return '{vn}[{n}][j]'.format(vn=self.gpu_varname[dest][0],
                                             n=n) if dest.bitwidth > 64 * n else '0'
            elif isinstance(dest, Register):
                return '{vn}[{n}][j+1]'.format(vn=self.gpu_varname[dest][0],
                                               n=n) if dest.bitwidth > 64 * n else '0'
            elif isinstance(dest, Output):
                return '{vn}[{n}*W+j]'.format(vn=self.gpu_varname[dest][0], n=n)
            else:
                return '{vn}{flag}[j]'.format(vn=self.gpu_varname[dest][0],
                                              flag=int(self.gpu_varname[dest][1]) + int(n))
        else:
            return '{vn}[{n}]'.format(vn=self.gpu_varname[dest][0], n=n) if dest.bitwidth > 64 * n else '0'

    def _gpu_build_wire(self, write, op, param, args, dest ):

        for n in range(CompiledSimulation._limbs(self, dest)):
            write('{dest} = {arg}{mask};'.format(
                dest=self._gpu_getdestlimb(dest, n ), arg=self._gpu_getarglimb(args[0], n ),
                mask=CompiledSimulation._makemask(self, dest, args[0].bitwidth, n)))


    def _gpu_build_memread(self, write, op, param, args, dest ):
        mem = param[1]
        for n in range(CompiledSimulation._limbs(self, dest)):
            if isinstance(mem, RomBlock):
                write('{dest} = *({mem}+{addr}[tid]*{limb}+{n}){mask};'.format(dest=self._gpu_getdestlimb(dest, n ), n=n,
                                                                               limb=CompiledSimulation._limbs(self,
                                                                                                              mem),
                                                                               mem=self.gpu_varname[mem][0],
                                                                               addr=self.gpu_varname[args[0]][0],
                                                                               mask=CompiledSimulation._makemask(self,
                                                                                                                 dest,
                                                                                                                 mem.bitwidth,
                                                                                                                 n)))
            else:
                write('{dest} = lookup({mem}, {addr}, i*W+tid)[{n}]{mask};'.format(
                    dest=self._gpu_getdestlimb(dest, n ), n=n,
                    mem=self.gpu_varname[mem][0], addr=self._gpu_getarglimb(args[0], 0 ),
                    mask=CompiledSimulation._makemask(self, dest, mem.bitwidth, n)))

    def _gpu_build_not(self, write, op, param, args, dest ):
        for n in range(CompiledSimulation._limbs(self, dest)):
            write('{dest} = (~{arg}){mask};'.format(
                dest=self._gpu_getdestlimb(dest, n ), arg=self._gpu_getarglimb(args[0], n ),
                mask=CompiledSimulation._makemask(self, dest, None, n)))

    def _gpu_build_bitwise(self, write, op, param, args, dest ):  # &, |, ^ only
        for n in range(CompiledSimulation._limbs(self, dest)):
            arg0 = self._gpu_getarglimb(args[0], n )
            arg1 = self._gpu_getarglimb(args[1], n )
            write('{dest} = ({arg0}{op}{arg1}){mask};'.format(
                dest=self._gpu_getdestlimb(dest, n ), arg0=arg0, arg1=arg1, op=op,
                mask=CompiledSimulation._makemask(self, dest, max(args[0].bitwidth, args[1].bitwidth), n)))

    def _gpu_build_nand(self, write, op, param, args, dest ):
        for n in range(CompiledSimulation._limbs(self, dest)):
            arg0 = self._gpu_getarglimb(args[0], n )
            arg1 = self._gpu_getarglimb(args[1], n )
            write('{dest} = (~({arg0}&{arg1})){mask};'.format(
                dest=self._gpu_getdestlimb(dest, n ), arg0=arg0, arg1=arg1,
                mask=CompiledSimulation._makemask(self, dest, None, n)))

    def _gpu_build_eq(self, write, op, param, args, dest ):
        cond = []
        for n in range(max(CompiledSimulation._limbs(self, args[0]), CompiledSimulation._limbs(self, args[1]))):
            arg0 = self._gpu_getarglimb(args[0], n )
            arg1 = self._gpu_getarglimb(args[1], n )
            cond.append('({arg0}=={arg1})'.format(arg0=arg0, arg1=arg1))
        write('{dest} = {cond};'.format(dest=self._gpu_getdestlimb(dest, n ), cond='&&'.join(cond)))

    def _gpu_build_cmp(self, write, op, param, args, dest ):  # <, > only
        cond = None
        for n in range(max(CompiledSimulation._limbs(self, args[0]), CompiledSimulation._limbs(self, args[1]))):
            arg0 = self._gpu_getarglimb(args[0], n )
            arg1 = self._gpu_getarglimb(args[1], n )
            c = '({arg0}{op}{arg1})'.format(arg0=arg0, op=op, arg1=arg1)
            if cond is None:
                cond = c
            else:
                cond = '({c}||(({arg0}=={arg1})&&{inner}))'.format(
                    c=c, arg0=arg0, arg1=arg1, inner=cond)
        write('{dest} = {cond};'.format(dest=self._gpu_getdestlimb(dest, n ), cond=cond))

    def _gpu_build_mux(self, write, op, param, args, dest ):
        type = self._val_type(dest)
        write('{type} tmp_{name}[2][{limbs}];'.format(name=dest.name, type=type,
                                                      limbs=CompiledSimulation._limbs(self, dest)))
        for n in range(CompiledSimulation._limbs(self, dest)):
            write('tmp_{name}[0][{n}] = {arg}{mask};'.format(name=dest.name, n=n,
                                                             arg=self._gpu_getarglimb(args[1], n ),
                                                             mask=CompiledSimulation._makemask(self, dest,
                                                                                               args[1].bitwidth,
                                                                                               n)))
        for n in range(CompiledSimulation._limbs(self, dest)):
            write('tmp_{name}[1][{n}] = {arg}{mask};'.format(name=dest.name, n=n,
                                                             arg=self._gpu_getarglimb(args[2], n ),
                                                             mask=CompiledSimulation._makemask(self, dest,
                                                                                               args[2].bitwidth,
                                                                                               n)))
        for n in range(CompiledSimulation._limbs(self, dest)):
            write('{dest} = tmp_{name}[{mux}][{n}];'.format(name=dest.name, n=n,
                                                            dest=self._gpu_getdestlimb(dest, n ),
                                                            mux=self._gpu_getarglimb(args[0], 0 )))

    def _gpu_build_add(self, write, op, param, args, dest ):
        # write('uint64_t tmp, carry;')  # temporary variables
        write('carry=0;')
        for n in range(CompiledSimulation._limbs(self, dest)):
            arg0 = self._gpu_getarglimb(args[0], n )
            arg1 = self._gpu_getarglimb(args[1], n )
            write('tmp = {arg0}+{arg1};'.format(arg0=arg0, arg1=arg1))
            write('{dest} = (tmp + carry){mask};'.format(dest=self._gpu_getdestlimb(dest, n ),
                                                         mask=CompiledSimulation._makemask(self, dest,
                                                                                           max(args[0].bitwidth,
                                                                                               args[1].bitwidth) + 1,
                                                                                           n)))
            write('carry = (tmp < {arg0})|({dest} < tmp);'.format(
                arg0=arg0, dest=self._gpu_getdestlimb(dest, n )))

    def _gpu_build_sub(self, write, op, param, args, dest ):
        # write('uint64_t tmp, carry;')  # temporary variables
        write('carry = 0;')
        for n in range(CompiledSimulation._limbs(self, dest)):
            arg0 = self._gpu_getarglimb(args[0], n )
            arg1 = self._gpu_getarglimb(args[1], n )
            write('tmp = {arg0}-{arg1};'.format(arg0=arg0, arg1=arg1))
            write('{dest} = (tmp - carry){mask};'.format(
                dest=self._gpu_getdestlimb(dest, n ), mask=CompiledSimulation._makemask(self, dest, None, n)))
            write('carry = (tmp > {arg0})|({dest} > tmp);'.format(
                arg0=arg0, dest=self._gpu_getdestlimb(dest, n )))

    ##########

    def _gpu_build_mul(self, write, op, param, args, dest ):
        # write('uint64_t tmp, carry, tmphi, tmplo;')  # temporary variables
        for n in range(CompiledSimulation._limbs(self, dest)):
            write('{dest} = 0;'.format(dest=self._gpu_getdestlimb(dest, n )))
        for p0 in range(CompiledSimulation._limbs(self, args[0])):
            write('carry = 0;')
            arg0 = self._gpu_getarglimb(args[0], p0 )
            for p1 in range(CompiledSimulation._limbs(self, args[1])):
                if CompiledSimulation._limbs(self, dest) <= p0 + p1:
                    break
                arg1 = self._gpu_getarglimb(args[1], p1 )
                write('gpu_mul128({arg0}, {arg1}, &tmplo, &tmphi);'.format(arg0=arg0, arg1=arg1))
                write('tmp = {dest};'.format(dest=self._gpu_getdestlimb(dest, p0 + p1 )))
                write('tmplo += carry; carry = tmplo < carry; tmplo += tmp;')
                write('tmphi += carry + (tmplo < tmp); carry = tmphi;')
                write('{dest} = tmplo{mask};'.format(
                    dest=self._gpu_getdestlimb(dest, p0 + p1 ),
                    mask=CompiledSimulation._makemask(self, dest, args[0].bitwidth + args[1].bitwidth, p0 + p1)))
            if CompiledSimulation._limbs(self, dest) > p0 + CompiledSimulation._limbs(self, args[1]):
                write('{dest} = carry{mask};'.format(
                    dest=self._gpu_getdestlimb(dest, p0 + CompiledSimulation._limbs(self, args[1]) ),
                    mask=CompiledSimulation._makemask(self,
                                                      dest, args[0].bitwidth + args[1].bitwidth,
                                                      p0 + CompiledSimulation._limbs(self, args[1]))))

    def _gpu_build_concat(self, write, op, param, args, dest):
        cattotal = sum(x.bitwidth for x in args)
        pieces = (
            (a, lx, 0, min(64, a.bitwidth - 64 * lx))
            for a in reversed(args) for lx in range(CompiledSimulation._limbs(self, a)))
        curr = next(pieces)
        for n in range(CompiledSimulation._limbs(self, dest)):
            res = []
            dpos = 0
            while True:
                arg, alimb, astart, asize = curr
                # print(arg)
                res.append('((((uint64_t){arg}>>{start}))<<{pos})'.format(
                    arg=self._gpu_getarglimb(arg, alimb ), start=astart, pos=dpos))
                dpos += asize
                if dpos >= dest.bitwidth - 64 * n:
                    break
                if dpos > 64:
                    curr = (arg, alimb, 64 - (dpos - asize), dpos - 64)
                    break
                curr = next(pieces)
                if dpos == 64:
                    break
            write('{dest} = ({res}){mask};'.format(
                dest=self._gpu_getdestlimb(dest, n ), res='|'.join(res),
                mask=CompiledSimulation._makemask(self, dest, cattotal, n)))

    def _gpu_build_select(self, write, op, param, args, dest ):
        for n in range(CompiledSimulation._limbs(self, dest)):
            bits = [
                '((1&(((uint64_t){src}>>{sb})))<<{db})'.format(
                    src=self._gpu_getarglimb(args[0], n ), sb=(b % 64), limb=(b // 64), db=en)
                for en, b in enumerate(param[64 * n:min(dest.bitwidth, 64 * (n + 1))])]
            write('{dest} = {bits};'.format(
                dest=self._gpu_getdestlimb(dest, n ), bits='|'.join(bits)))

    def _gpu_declare_mem_helpers(self, write):
        helpers = '''


typedef struct step_node
{
    int step;
    uint64_t *val;
    struct step_node *next;
} step_node_t;

typedef struct node
{
    uint64_t key;
    struct step_node *step_node;
    struct node *next;
} node_t;

typedef struct hashmap
{
    int size;
    int val_limbs;
    uint64_t *default_value;
    node_t **list;
} hashmap_t;

__device__ hashmap_t *create_hash_map(int size, int val_limbs)
{
    int i;
    hashmap_t *h = (hashmap_t *) malloc(sizeof(hashmap_t));
    h->size = size;
    h->val_limbs = val_limbs;
    h->list = (node_t **) malloc(sizeof(node_t *) * size);
    h->default_value = (uint64_t *) malloc(sizeof(uint64_t) * val_limbs);
    for (i = 0; i < val_limbs; i++)
        h->default_value[i] = 0;
    for (i = 0; i < size; i++)
        h->list[i] = NULL;
    return h;
}

__device__ int hash_code(hashmap_t *h, int key)
{
    return key % h->size;
}

__device__ void insert(hashmap_t *h, uint64_t key, uint64_t val[],uint64_t step)
{
    int pos = hash_code(h, key);
    struct node *list = h->list[pos];
    struct node *new_node = (node_t *) malloc(sizeof(node_t));
    struct step_node *new_step_node = (step_node_t *) malloc(sizeof(step_node_t));
    struct node *temp = list;
    while (temp) //有这个hashcode
    {
        if (temp->key == key) //有这个key了
        {           
            struct step_node *samekey = temp->step_node;
            struct step_node *temp_step = samekey;
            while(temp_step){ 
                if(temp_step->step == step){
                    memcpy(temp_step->val, val, sizeof(uint64_t) * h->val_limbs);
                    return;
                }
                else{
                    new_step_node->step = step;
                    new_step_node->val = (uint64_t *) malloc(sizeof(uint64_t) * h->val_limbs);
                    memcpy(new_step_node->val, val, sizeof(uint64_t) * h->val_limbs);
                    new_step_node->next = samekey;
                    temp->step_node=new_step_node;
                    return;
                }
                temp_step = temp_step->next;
            }
        }        
        temp = temp->next;
    }
    new_step_node->step = step;
    new_step_node->val = (uint64_t *) malloc(sizeof(uint64_t) * h->val_limbs);
    memcpy(new_step_node->val, val, sizeof(uint64_t) * h->val_limbs);
    new_step_node->next = NULL;
    new_node->key = key;
    new_node->step_node = new_step_node;
    new_node->next = list;
    h->list[pos] = new_node;
}

__device__ uint64_t* lookup(hashmap_t *h, uint64_t key, int step)
{
    int pos = hash_code(h, key);
    node_t *list = h->list[pos];
    node_t *temp = list;
    uint64_t *val;
    int max_step=-2;
    while (temp)
    {
        if (temp->key == key)
        {
            step_node_t *samekey = temp->step_node;
            step_node_t *temp_step = samekey;
            while (temp_step){
                if (temp_step->step <= step && temp_step->step > max_step){
                    max_step = temp_step->step;
                    val = temp_step->val;
                }
                temp_step = temp_step->next;
            }
            if(max_step == -2){
                val = h->default_value;
            }
            return val;
        }
        temp = temp->next;
    }
    return h->default_value;
}
            '''
        write(helpers)

    def _gpu_declare_roms(self, write, roms):
        for mem in roms:
            vn = CompiledSimulation._clean_name(self, 'm', mem)
            self.gpu_varname[mem] = [vn, 0, vn]
            # extract data from mem
            romval = [mem._get_read_data(n) for n in range(1 << mem.addrwidth)]
            write('__constant__ uint{width}_t {name}[] = {{'.format(
                name=vn, width=CompiledSimulation._romwidth(self, mem), limbs=CompiledSimulation._limbs(self, mem)))
            for rv in romval:
                write(CompiledSimulation._makeini(self, mem, rv) + ',')
            write('};')

    def _gpu_declare_mems(self, write, mems):
        mem_name = []
        for mem in mems:
            # Create hashmap
            vn = CompiledSimulation._clean_name(self, 'm', mem)
            self.gpu_varname[mem] = [vn, 0, vn]
            write('__device__ hashmap_t *{name};'.format(name=vn))
            # mem_name.append('hashmap*'+vn+'[W]')
        next_tmp = 0
        write('EXPORT')
        write('__global__ void initialize_mems() ')
        write('{')
        write('int tid = threadIdx.x;')
        write('switch(tid){')
        for index, mem in enumerate(mems):
            write('case {index}:'.format(index=index))
            write('{')
            typedef = '''

                          typedef struct node
                          {
                              uint64_t key;
                              uint64_t *val;
                              struct node *next;
                          } node_t;

                          typedef struct hashmap
                          {
                              int size;
                              int val_limbs;
                              uint64_t *default_value;
                              node_t **list;
                          } hashmap_t;
                '''
            # write(typedef)
            write('{name} = create_hash_map(256, {limbs});'.format(
                name=self.gpu_varname[mem][0], limbs=CompiledSimulation._limbs(self, mem)
            ))
            if mem in self._memmap:
                # Insert default values
                for k, v in self._memmap[mem].items():
                    write('uint64_t t{n}[] = {val};'.format(
                        n=next_tmp, val=CompiledSimulation._makeini(self, mem, v)))
                    write('insert({name}, {key}, t{n},-1);'.format(
                        name=self.gpu_varname[mem][0], key=k, n=next_tmp
                    ))
                    next_tmp += 1
            write('break;}')
        write('default:break;}')
        write('}')

    def _declare_gpu_wv(self, write, w, type):
        # vn = self.gpu_varname[w][0]  # = CompiledSimulation._clean_name(self,'w', w)
        if isinstance(w, Const):
            write('{type} {name}[{limbs}] = {val};'.format(type=type,
                                                           limbs=CompiledSimulation._limbs(self, w),
                                                           name=self.gpu_varname[w][0],
                                                           val=CompiledSimulation._makeini(self, w, w.val)))
        elif isinstance(w, Register) | isinstance(w, Input) | isinstance(w, Output):
            pass
            # rval = self._regmap.get(w, w.reset_value)
            # if rval is None:
            #     rval = self.default_value
            # write('static {type} {name}[{limbs}*(W+1)] = {val};'.format(type=type,
            #                                                             limbs=CompiledSimulation._limbs(self, w),
            #                                                             name=vn,
            #                                                             val=CompiledSimulation._makeini(self, w, rval)))
        else:
            write('__shared__ {type} {name}[{limbs}][tnum];'.format(type=type, limbs=CompiledSimulation._limbs(self, w),
                                                                    name=self.gpu_varname[w][0]))

    def replace_wire(self,orig_wire, new_src, new_dst, src_nets, dst_nets,indexToNet,block):

        def remove_net(net_):

            # print(net_)
            # print('block')
            # for i in block.logic:
            #     print(i)
            block.logic.remove(net_)

        def add_net(net_):

            block.add_net(net_)



        if new_src is not orig_wire and orig_wire in src_nets:
            # don't need to add the new_src and new_dst because they were made at creation
            net = indexToNet[src_nets[orig_wire]]

            new_net = LogicNet(
                op=net.op, op_param=net.op_param, args=net.args,
                dests=tuple(new_src if w is orig_wire else w for w in net.dests))
            remove_net(net)
            add_net(new_net)
            indexToNet[src_nets[orig_wire]]=new_net

        if new_dst is not orig_wire and orig_wire in dst_nets:
            for index in dst_nets[orig_wire]:
                net = indexToNet[index]
                # print('old_net')
                # print(net)

                new_net = LogicNet(
                    op=net.op, op_param=net.op_param, dests=net.dests,
                    args=tuple(new_dst if w is orig_wire else w for w in net.args))
                remove_net(net)
                add_net(new_net)
                indexToNet[index]=new_net

        if new_dst is not orig_wire and new_src is not orig_wire:
            block.remove_wirevector(orig_wire)

    def make_macro_block(self, id, net):
        # print('macro~~~~~~~~~~~~~~~~~~~~~~~~~~~~')

        macro_args_name = []
        macro_dests_name = []
        for arg in net.args:
            macro_args_name.append(arg.name)
        # print(macro_args_name)
        for dest in net.dests:
            macro_dests_name.append(dest.name)
        # print(macro_dests_name)
        ##把macro作为一个block
        loop_block = Block()
        loop_block_indexToNet = {}
        set_working_block(loop_block)
        reg = set()
        inter_reg = set()
        in_wire = set()
        out_wire = set()
        inter_wv = {}
        rename_dest = {}
        replace_wv = []
        replace_net = self.loop_blocks_outputs[id]

        for node in net.logicnet:
            args = []
            dests = []
            macro_logicnet = self.indexTonet[node]
            #print(macro_logicnet)

            for macro_wv in macro_logicnet.args:
                # print(macro_wv)
                # 输入有wv,input,const组成
                if macro_wv.name in macro_args_name and not isinstance(macro_wv,
                                                                       Const):  # and not isinstance(macro_wv, Register)
                    # reg,wv和input都定义成input类
                    if macro_wv.name in in_wire:
                        wv = loop_block.get_wirevector_by_name(macro_wv.name)
                    else:
                        wv = Input(bitwidth=macro_wv.bitwidth, name=macro_wv.name, block=loop_block)
                        in_wire.add(macro_wv.name)
                elif macro_wv.name in macro_args_name and isinstance(macro_wv, Const):
                    if macro_wv.name in in_wire:
                        wv = loop_block.get_wirevector_by_name(macro_wv.name)
                    else:
                        wv = Const(val=macro_wv.val, name=macro_wv.name, bitwidth=macro_wv.bitwidth,
                                   block=loop_block)
                        in_wire.add(macro_wv.name)
                # elif macro_wv.name in macro_args_name and isinstance(macro_wv, Register):
                #     if macro_wv.name in reg:
                #         wv = loop_block.get_wirevector_by_name(macro_wv.name)
                #     else:
                #         wv = Register(bitwidth=macro_wv.bitwidth,
                #                       name=macro_wv.name,
                #                       reset_value=macro_wv.reset_value,
                #                       block=loop_block)
                #         in_wire.add(macro_wv.name)
                #         reg.add(macro_wv.name)
                else:  # 内部的线,wv/reg
                    if macro_wv.name in inter_wv.keys():
                        wv = loop_block.get_wirevector_by_name(inter_wv[macro_wv.name])
                    else:
                        if isinstance(macro_wv, Register):
                            wv = Register(bitwidth=macro_wv.bitwidth,
                                          name=macro_wv.name,
                                          reset_value=macro_wv.reset_value,
                                          block=loop_block)
                            reg.add(macro_wv.name)
                            inter_reg.add(macro_wv.name)
                        else:
                            wv = WireVector(bitwidth=macro_wv.bitwidth, block=loop_block)
                inter_wv[macro_wv.name] = wv.name
                # print(wv)
                loop_block.add_wirevector(wv)
                args.append(wv)
            for macro_wv in macro_logicnet.dests:
                # print(macro_logicnet)
                # 输出有outpu,wv,register
                if macro_wv.name in macro_dests_name and not isinstance(macro_wv, Register):
                    if isinstance(macro_wv, Output):
                        if macro_wv.name in out_wire:
                            wv = loop_block.get_wirevector_by_name(macro_wv.name)
                        else:
                            wv = Output(bitwidth=macro_wv.bitwidth, name=macro_wv.name, block=loop_block)
                            out_wire.add(macro_wv.name)
                    else:
                        # print('有输出的：' + str(macro_logicnet))
                        # print(macro_wv)
                        if macro_wv.name in out_wire:
                            wv = loop_block.get_wirevector_by_name(inter_wv[macro_wv.name])
                        else:  # 输出是wv类型的
                            if node not in replace_net.keys():
                                wv = Output(bitwidth=macro_wv.bitwidth, name=macro_wv.name, block=loop_block)
                            else:
                                if macro_wv.name.startswith('tmp'):
                                    wv = WireVector(bitwidth=macro_wv.bitwidth,name='_'+macro_wv.name, block=loop_block)
                                else:
                                    wv = WireVector(bitwidth=macro_wv.bitwidth, name=macro_wv.name,
                                                    block=loop_block)
                                #rename_dest[macro_wv.name] = wv
                            out_wire.add(macro_wv.name)
                            # for i in rename_dest.items():
                            #     print(str(i[0]) + ":" + str(i[1]))
                elif macro_wv.name in macro_dests_name and isinstance(macro_wv, Register):
                    if macro_wv.name in reg:
                        wv = loop_block.get_wirevector_by_name(macro_wv.name)
                    else:
                        if macro_wv.name in self.macro_out_reg:
                            wv = Register(bitwidth=macro_wv.bitwidth,
                                          name='_tmp_tx_' + macro_wv.name,
                                          reset_value=macro_wv.reset_value,
                                          block=loop_block)
                            macro_outwv = Output(bitwidth=wv.bitwidth,
                                                 name=macro_wv.name, block=loop_block)
                            # loop_block.add_wirevector(macro_outwv)
                            macro_outwv <<= wv
                        else:
                            wv = Register(bitwidth=macro_wv.bitwidth,
                                          name=macro_wv.name,
                                          reset_value=macro_wv.reset_value,
                                          block=loop_block)
                        out_wire.add(macro_wv.name)
                        reg.add(macro_wv.name)
                else:  # 内部的线
                    if macro_wv.name in inter_wv.keys():
                        wv = loop_block.get_wirevector_by_name(inter_wv[macro_wv.name])
                    else:
                        if isinstance(macro_wv, Register):
                            if macro_wv.name in reg:
                                wv = loop_block.get_wirevector_by_name(inter_wv[macro_wv.name])
                            else:
                                wv = Register(bitwidth=macro_wv.bitwidth,
                                              name=macro_wv.name,
                                              reset_value=macro_wv.reset_value,
                                              block=loop_block)
                                reg.add(wv)
                                inter_reg.add(wv)
                        else:
                            if macro_wv.name.startswith('tmp'):
                                wv = WireVector(bitwidth=macro_wv.bitwidth,name='_'+macro_wv.name,block=loop_block)
                            else:
                                wv = WireVector(bitwidth=macro_wv.bitwidth, name=macro_wv.name, block=loop_block)
                inter_wv[macro_wv.name] = wv.name

                # print(wv)
                loop_block.add_wirevector(wv)
                dests.append(wv)

            op = macro_logicnet.op
            op_param = macro_logicnet.op_param
            macro_block_logicnet = LogicNet(op=op, op_param=op_param, args=tuple(args), dests=tuple(dests))
            # print(macro_block_logicnet)
            loop_block.add_net(macro_block_logicnet)
            loop_block_indexToNet[node] = macro_block_logicnet
        
        self.loops_inter_reg.update(inter_reg)

        #print('macro_old##############')
        #print(loop_block)
        # print('#########################')
        #set_working_block(loop_block)
        for src_net, dest_nets in replace_net.items():
            macro_logicnet = loop_block_indexToNet[src_net]
            #print(macro_logicnet)
            wname = macro_logicnet.dests[0].name
            org_wv = loop_block.get_wirevector_by_name(wname)
            wbit = org_wv.bitwidth
            if isinstance(org_wv, Register):
                re_wv = Register(bitwidth=wbit, name='_tmp_tx_' + wname,
                                 block=loop_block)
            else:
                re_wv = WireVector(bitwidth=wbit, name='_tmp_tx_' + wname,
                                   block=loop_block)
            inter_wv[get_key(inter_wv,wname)[0]] = re_wv.name
            loop_block.add_wirevector(re_wv)
            #print(re_wv)
            self.replace_wire(org_wv, re_wv, re_wv, {org_wv: src_net},
                              {org_wv: dest_nets},
                              loop_block_indexToNet,
                              loop_block)

            macro_outwv = Output(bitwidth=wbit,
                                 name=get_key(inter_wv,re_wv.name)[0], block=loop_block)

            loop_block.add_wirevector(macro_outwv)
            macro_outwv <<= re_wv

        #set_working_block(self.block)
        # print(reg)
        # print("macro_output~~~~~~~~~~~~~~~~~~~~")
        # print(net)
        # for i in rename_dest.items():
        #     print(str(i[0])+":"+str(i[1]))
        #self.macro_output[id] = rename_dest
        self.loops_block[id]=loop_block
        self.loops_reg[id] = loop_block.wirevector_subset(Register)
        r_n=0
        for r in self.loops_reg.values():
            r_n += len(r)
        print('loop inter reg number:'+str(r_n))
        #print('macro_new~~~~~~~~~~~~~~~~~')
        #print(loop_block)
        #print(inter_wv)
        return inter_wv

    def ess_macro_pretreat(self, loop_block):

        sg = StatementGraph(loop_block)
        excludedIDs = []
        for i in range(len(sg.inNeigh)):
            if i not in sg.validNodes:
                excludedIDs.append(i)
        ap = AcyclicPart(sg, distinct(excludedIDs))
        ap.partition(40)

        convertIntoCPStmts(sg, ap, excludedIDs)
        self.stmtsOrdered = sg.stmtsOrdered()
        # self.stmtsOrdered = len(stmtsOrdered)
        # print(len(self.stmtsOrdered))

        # 记录每个寄存器的使用者，用于寄存器更新后修改Flag
        self.register_dependence = dict()
        # 记录每个memory的使用者，用于memory更新后修改Flag
        self.memory_dependence = dict()
        # 记录每个outputsToDeclare的使用者，用于outputsToDeclare被赋值后修改Flag
        self.outputsToDeclare_dependence = dict()
        self.outputsToDeclare = []
        self.outputsToDeclare_information = dict()
        for cp in self.stmtsOrdered:
            id = cp.id
            for logicnet in cp.memberStmts:
                for arg in logicnet.args:
                    if isinstance(arg, Register):
                        reg = arg.name
                        if reg not in self.register_dependence:
                            self.register_dependence[reg] = [id]
                        else:
                            if id not in self.register_dependence[reg]:
                                self.register_dependence[reg].append(id)
                if logicnet.op == 'm':
                    mem = logicnet.op_param[1].name
                    if mem not in self.memory_dependence:
                        self.memory_dependence[mem] = [id]
                    else:
                        if id not in self.memory_dependence[mem]:
                            self.memory_dependence[mem].append(id)
            for out in cp.outputsToDeclare:
                self.outputsToDeclare_dependence[out] = []
                self.outputsToDeclare.append(out)
        for cp in self.stmtsOrdered:
            id = cp.id
            for logicnet in cp.memberStmts:
                for arg in logicnet.args:
                    if arg.name in self.outputsToDeclare and arg.name not in cp.outputsToDeclare:
                        self.outputsToDeclare_information[arg.name] = arg
                        if id not in self.outputsToDeclare_dependence[arg.name]:
                            self.outputsToDeclare_dependence[arg.name].append(id)


    def _declare_macro_wv_ess(self, write,loop_block,id):

        write('uint64_t tmp, carry, tmphi, tmplo=0;')  # temporary variables
        # noneeddecalre=[wv.name for wv in list(loop_block.wirevector_subset(Input))]
        # noneeddecalre.extend([wv.name for wv in list(loop_block.wirevector_subset(Const))])
        # noneeddecalre.extend([wv.name for wv in list(loop_block.wirevector_subset(Register))])
        # noneeddecalre.extend([wv.name for wv in list(loop_block.wirevector_subset(Output))])
        # 定义常量，寄存器，跨区间信号，中间信号
        for w in loop_block.wirevector_set:
            ESSENT._declare_wire(self, write, w, self._val_type(w))
            if isinstance(w, Register):
                if self.loop_in_gpu(id):
                    gw = self.block.get_wirevector_by_name(get_key(self.loops_inter_wv[id], w.name)[0])
                    for limb in range(ESSENT._limbs(self, w)):
                        write('{wname}[{limb}]={gpu_wname}_default[{limb}][0];'.format(
                            wname=self.varname[w], limb=limb,
                            gpu_wname=self.gpu_varname[gw][0]))
                        write('{gpu_wname}[{limb}][0]={gpu_wname}_default[{limb}][0];'.format(
                            gpu_wname=self.gpu_varname[gw][0],
                            limb=limb))

        mems = {net.op_param[1] for net in loop_block.logic_subset('m@')}
        if mems:
            for key in self._memmap:
                if key not in mems:
                    raise PyrtlError('unrecognized MemBlock in memory_value_map')
                if isinstance(key, RomBlock):
                    raise PyrtlError('RomBlock in memory_value_map')
        roms = {mem for mem in mems if isinstance(mem, RomBlock)}
        for mem in roms:
            self.varname[mem] = vn = ESSENT._clean_name(self, 'm', mem)
            write(' uint{width}_t *{name}; '.format(
                name=vn, width=self._romwidth(mem), limbs=self._limbs(mem)))
            write('{name} = {gpu_name};'.format(name=vn, gpu_name=self.gpu_varname[mem][0]))
        mems = {mem for mem in mems if isinstance(mem, MemBlock) and not isinstance(mem, RomBlock)}
        for mem in mems:
            self.varname[mem] = vn = ESSENT._clean_name(self, 'm', mem)
            write('hashmap_t *{name};'.format(name=vn))
            write('{name} = {gpu_name};'.format(name=vn, gpu_name=self.gpu_varname[mem][0]))

    def _connect_logicnet_with_macro_in_ess(self, write, loop_block, net, layer,i):
        if layer % 2:
            mid_in_flag = '_0_'
            mid_out_flag = '_1_'
        else:
            mid_in_flag = '_1_'
            mid_out_flag = '_0_'
        # macro中的主输入
        macro_args_name = []
        for arg in net.args:
            macro_args_name.append(arg.name)
            if isinstance(arg,Input):
                for n in range(ESSENT._limbs(self, arg)):
                    write('{wname}[{n}]={gpu_wname}[{n}*W+j];'.format(n=n, wname=self.varname[
                        loop_block.get_wirevector_by_name(arg.name)],
                                                                           gpu_wname=self.gpu_varname[arg][0]))
            else:
                if self.loop_in_gpu(i):
                    if arg.name in self.loops_output:
                        for n in range(ESSENT._limbs(self, arg)):
                            write('{wname}[{n}]=loop_{gpu_wname}[{n}*W+j];'.format(n=n, wname=self.varname[
                                loop_block.get_wirevector_by_name(arg.name)],
                                                                                   gpu_wname=self.gpu_varname[arg][0]))

                else:
                    for n in range(ESSENT._limbs(self, arg)):
                        write('{wname}[{n}]=loop_{gpu_wname}[{n}*W+j];'.format(n=n, wname=self.varname[
                            loop_block.get_wirevector_by_name(arg.name)],
                                                                               gpu_wname=self.gpu_varname[arg][0]))

        if not self.loop_in_gpu(i):
            in_wrie = set()
            # 来自其他节点输出midout的输入，要把线和midout,reg以及longkeep对应
            source_node = self.g.predecessors(self.new_layer_id[layer][i]).tolist()
            for node in source_node:
                # print(node)
                source_out_item = self.mid_outpin[node]
                # print(source_out_item)
                for pin in source_out_item:
                    w = self.block.wirevector_by_name[pin[2]]
                    if pin[2] in macro_args_name:
                        if pin[2] not in in_wrie:
                            in_wrie.add(pin[2])
                            val_type = self._val_type(w)
                            if pin[0] >= 0:
                                for n in range(ESSENT._limbs(self, w)):
                                    write('{vname}[{n}]={gpu_wname}{flag}[j];'.format(
                                        vname=self.varname[self.macro_block.get_wirevector_by_name(w.name)],
                                        gpu_wname=self.gpu_varname[w][0], flag=pin[0] + n, n=n))
                            else:
                                for n in range(ESSENT._limbs(self, w)):
                                    if isinstance(w, Register):
                                        write('{vname}[{n}]={gpu_wname}[{n}][j];'.format(
                                            vname=self.varname[self.macro_block.get_wirevector_by_name(w.name)],
                                            gpu_wname=self.gpu_varname[w][0], type=pin[1],
                                            i=pin[0], n=n))
                                    else:
                                        write('{vname}[{n}]={gpu_wname}[{n}][j];'.format(
                                            vname=self.varname[self.macro_block.get_wirevector_by_name(w.name)],
                                            gpu_wname=self.gpu_varname[w][0], type=pin[1],
                                            i=pin[0], n=n))

    def _connect_logicnet_with_macro_out_ess(self, write, loop_block,id, net, layer):
        if layer % 2:
            mid_in_flag = '_0_'
            mid_out_flag = '_1_'
        else:
            mid_in_flag = '_1_'
            mid_out_flag = '_0_'

        # macro的寄存器
        if self.loop_in_gpu(id):
            for reg in loop_block.wirevector_subset(Register):
                # print(reg)
                regname = reg.name
                if regname in self.loops_inter_wv[id].values():
                    regname = get_key(self.loops_inter_wv[id], regname)[0]
                # print(regname)
                reg = self.block.get_wirevector_by_name(regname)
                write('if(j==W-1){')
                for n in range(ESSENT._limbs(self, reg)):
                    write(self.gpu_varname[reg][0] + '_default[' + str(n) + '][0]=' + \
                          self.varname[loop_block.get_wirevector_by_name(regname)] + '[' + str(n) + '];')
                write('}')
        else:
            pass
            # for reg in loop_block.wirevector_subset(Register):
            #     # print(reg)
            #     regname = reg.name
            #     if regname in self.loops_inter_wv[id].values():
            #         regname = get_key(self.loops_inter_wv[id], regname)[0]
            #     # print(regname)
            #     reg = self.block.get_wirevector_by_name(regname)
            #     write('if(j==W-1){')
            #     for n in range(ESSENT._limbs(self, reg)):
            #         write('loop_'+self.gpu_varname[reg][0] + '_default[' + str(n) + '][0]=' + \
            #               self.varname[loop_block.get_wirevector_by_name(regname)] + '[' + str(n) + '];')
            #     write('}')

        for dest in net.dests:
            if isinstance(dest,Output):
                for n in range(ESSENT._limbs(self, dest)):
                    write(
                        '{gname}[{limb}*W+j]={name}[{limb}];'.format(gname=self.gpu_varname[dest][0], name=self.varname[
                            loop_block.get_wirevector_by_name(dest.name)], limb=n))
            else:
                if self.loop_in_gpu(id):
                    for n in range(ESSENT._limbs(self, dest)):
                        if dest.name in self.loops_output:
                            write(
                            '{gname}[{limb}][j]={name}[{limb}];'.format(gname=self.gpu_varname[dest][0],
                                                                              name=self.varname[
                                                                                  loop_block.get_wirevector_by_name(
                                                                                      dest.name)], limb=n))
                else:
                    for n in range(ESSENT._limbs(self, dest)):
                        write(
                            'loop_{gname}[{limb}*W+j]={name}[{limb}];'.format(gname=self.gpu_varname[dest][0],
                                                                              name=self.varname[
                                                                                  loop_block.get_wirevector_by_name(
                                                                                      dest.name)], limb=n))

    def write_macro_simbody_ess(self, write,loop_block):
        macro_reg = loop_block.wirevector_subset(Register)
        # 记录Memory写
        self.memory_update = []
        # 记录每个CondPart的输入信号，可能是外部输入信号，也可能是内部信号
        self.condPart_inputs = []
        # 为每个分区生成模拟函数
        for cp in self.stmtsOrdered:
            id = cp.id
            if isinstance(cp, CondPart):
                # 确定每个分区对外部输入信号的依赖,并更新Flag
                CondPart_external_input = []
                for i in loop_block.wirevector_set:
                    if not isinstance(i, Const):
                        if (i.name in cp.inputs) and not isinstance(i,
                                                                    Register) and i.name not in self.outputsToDeclare:
                            CondPart_external_input.append(i)
                if self.optimization in [2, 3]:
                    # for input_ in CondPart_external_input:
                    #     # print(input_)
                    #     for n in range(self._limbs(input_)):
                    #         write('  ' * 2 + 'Flag_same_inputs[{num}] &= {vn}[{n}]=={vn}$old[{n}];'.format(num=id,
                    #                                                                                        vn=
                    #                                                                                        self.varname[
                    #                                                                                            input_],
                    #                                                                                        n=n))

                    # 根据Flag结果决定是否执行
                    # write('  ' * 2 + 'if (Flag_same_inputs[{num}]==0)'.format(num=id))
                    # write('  ' * 2 + '{')
                    # if not cp.alwaysActive:
                    #     write('  ' * 3 + 'Flag_same_inputs[{num}]=1;'.format(num=id))
                    #     write('')

                    memberStmts = cp.memberStmts
                    keepAvail = []
                    for i in cp.outputsToDeclare:
                        keepAvail.append(i)
                    ESSENT.writeBodyInner(self, StatementGraph(memberStmts), True, write, 3, id, keepAvail)
                    write('')
                else:
                    memberStmts = cp.memberStmts
                    keepAvail = []
                    for i in cp.outputsToDeclare:
                        keepAvail.append(i)
                    ESSENT.writeBodyInner(self, StatementGraph(memberStmts), True, write, 2, id, keepAvail)
                    write('')

                # if self.optimization in [2, 3]:
                #     for out in cp.outputsToDeclare:
                #         out_information = self.outputsToDeclare_information[out]
                #         vn = self.varname[out_information]
                #         for dependence_id in self.outputsToDeclare_dependence[out]:
                #             for n in range(ESSENT._limbs(self, out_information)):
                #                 write(2 * '  ' + 'Flag_same_inputs[{id}] &= {name}[{n}]=={name}$old[{n}];'.format(
                #                     id=dependence_id, n=n, name=vn))
                #         for n in range(ESSENT._limbs(self, out_information)):
                #             write(2 * '  ' + '{name}$old[{n}]={name}[{n}];'.format(n=n, name=vn))
                #     write('  ' * 2 + '}')
                #     write('')

        if self.optimization in [2, 3]:
            # 每一拍更新register，并修改使用者的Flag
            for register in macro_reg:
                # if register.name in self.register_dependence:
                #     for dependence_id in self.register_dependence[register.name]:
                #         for n in range(ESSENT._limbs(self, register)):
                #             write(2 * '  ' + 'Flag_same_inputs[{id}] &= {vn}$new[{n}]=={vn}[{n}];'.format(
                #                 vn=self.varname[loop_block.get_wirevector_by_name(register.name)], n=n,
                #                 id=dependence_id))
                for n in range(ESSENT._limbs(self, register)):
                    write(2 * '  ' + '{vn}[{n}] = {vn}$new[{n}];'.format(
                        vn=self.varname[loop_block.get_wirevector_by_name(register.name)],
                        n=n))

            write('')

            # 每一拍更新内存写入，并修改使用者的Flag
            for memory_update in self.memory_update:
                op, param, args, dest = memory_update.op, memory_update.op_param, memory_update.args, memory_update.dests
                ESSENT._build_memory_write(self, write, op, param, args, dest, 2)
                memory = param[1].name
                # if memory in self.memory_dependence:
                #     for dependence_id in self.memory_dependence[memory]:
                #         write(2 * '  ' + 'Flag_same_inputs[{id}] = 0;'.format(id=dependence_id))

            # if self.optimization in [2, 3]:
            #     for input in loop_block.wirevector_subset(Input):
            #         for n in range(self._limbs(input)):
            #             write('  ' * 2 + '{vn}$old[{n}]={vn}[{n}];'.format(n=n, vn=self.varname[input]))

    def _declare_mergelogicnet_wv(self, write, net):
        wname = []
        add_mul_op = []
        reg_name=[]
        self.merge_inter_wv=[]
        for w in net.args :
            wname.append(w.name)
        for w in net.args + net.dests:
            if not isinstance(w,Register):
                wname.append(w.name)
        for node in net.logicnet:
            n = self.indexTonet[node]
            # print(str(node)+':'+str(n))
            for op in n.op:
                if op in '+-*':
                    if add_mul_op:
                        add_mul_op.append(op)
                    else:
                        add_mul_op.append(op)
                        write('uint64_t tmp, carry, tmphi, tmplo=0;')

            for w in n.args + n.dests:
                if w.name in wname:
                    continue
                else:
                    wname.append(w.name)
                    self._declare_gpu_wv(write, w, self._val_type(w))
                    if isinstance(w, Input) | isinstance(w, Output):
                        pass
                    elif isinstance(w, Register):
                        write('__shared__ {type} shared_{vn}[{limbs}][tnum+1];'.format(type=self._val_type(w),vn=self.gpu_varname[w][2],
                                                                                       limbs=ESSENT._limbs(self,w)))
                        write('__shared__ {type} shared_{vn}_default[{limbs}][1];'.format(type=self._val_type(w),vn=self.gpu_varname[w][2],
                                                                                       limbs=ESSENT._limbs(self,w)))
                        reg_name.append(w)
                        self.merge_inter_wv.append(w.name)
                    else:
                        self.merge_inter_wv.append(w.name)
                    # print(w.name+':'+self.varname[w])
        return reg_name

    def _write_logicnet(self, write, net):
        # combinational logic
        op_builders = {
            'm': self._gpu_build_memread,
            'w': self._gpu_build_wire,
            '~': self._gpu_build_not,
            '&': self._gpu_build_bitwise,
            '|': self._gpu_build_bitwise,
            '^': self._gpu_build_bitwise,
            'n': self._gpu_build_nand,  #######
            '=': self._gpu_build_eq,
            '<': self._gpu_build_cmp,
            '>': self._gpu_build_cmp,
            '+': self._gpu_build_add,
            '-': self._gpu_build_sub,
            '*': self._gpu_build_mul,  ###
            'c': self._gpu_build_concat,
            's': self._gpu_build_select,
            'x': self._gpu_build_mux,
        }



        if net.op not in 'r@':
            op, param, args, dest = net.op, net.op_param, net.args, net.dests[0]
            write('// net {op} : {arg} -> {dest0}'.format(
                op=op, arg=', '.join(str(self.gpu_varname[x][0]) for x in args), dest0=self.gpu_varname[dest][0]))

            op_builders[op](write, op, param, args, dest )

            #write('__threadfence_block();')
            #write('__syncthreads();')

            # memory writes
        elif net.op == '@':
            write('if(tid==0){')
            write('for(int j=0;j<W;j++){')
            write('//@')
            mem = net.op_param[1]

            write('if ({enable}) {{'.format(enable=self._gpu_for_getarglimb(net.args[2], 0)))
            write('uint64_t val[{limb}];'.format(limb=self._limbs(net.args[0])))

            for n in range(self._limbs(net.args[1])):
                write('val[{n}] = {vn};'.format(vn=self._gpu_for_getarglimb(net.args[1], n), n=n))

            write('insert({mem},{addr}, val,i*W+j);'.format(
                mem=self.gpu_varname[mem][0],
                addr=self._gpu_for_getarglimb(net.args[0], 0)
            ))

            write('}}}')
            write('__syncthreads();')
            # register updates
        else:
            write('//r')
            rin = net.args[0]
            rout = net.dests[0]
            for n in range(CompiledSimulation._limbs(self, rin)):
                # write('if(it==0){')
                # write('{rout} = {rout_name}[{n}][W];'.format(rout=self._gpu_getarglimb(rout, n ),rout_name=self.gpu_varname[rout][0], n=n))
                # write('}')
                write(
                    'shared_{rout_name}[{n}][tid+1] = {vn};'.format(rout_name=self.gpu_varname[rout][2],
                                          n=n, vn=self._gpu_getarglimb(rin, n)))
                write('if(tid==0){')
                write(
                    'shared_{rout_name}[{n}][0] = shared_{rout_name}_default[0][0];'.format(rout_name=self.gpu_varname[rout][2],
                                            n=n))
                write(
                    'shared_{rout_name}_default[0][0] = shared_{rout_name}[{n}][tnum];'.format(
                        rout_name=self.gpu_varname[rout][2],
                        n=n))
                write('}')
                write('__syncthreads();')
                write('{rout_name}[{n}][it*tnum+tid]= shared_{rout_name}[{n}][tid];'.format(
                    rout_name=self.gpu_varname[rout][2], n=n))
                #write('__threadfence_block();')
                #write('__syncthreads();')

    def writesimbody(self, write, layer ):
        #const_wv = []
        reg_wv = []

        if layer % 2:
            mid_in_flag = '_0_'
            mid_out_flag = '_1_'
        else:
            mid_in_flag = '_1_'
            mid_out_flag = '_0_'

        write("int blockid = blockIdx.x;")

        write('switch(blockid%{net_num})'.format(net_num=len(self.netSequence[layer])))  #
        write('{')
        for i, n in enumerate(self.netSequence[layer]):  #
            net = n[0]
            write('case {i}:'.format(i=i))
            write('{ int tid = threadIdx.x;')
            #write('int pos=blockid/{net_num};'.format(net_num=len(self.netSequence[layer])))
            if layer > 0:  # 对应中间输入，lk,reg,in,const等同名不用管
                source_node = self.g.predecessors(n[1]).tolist()
                for node in source_node:
                    # print(node)
                    source_out_item = self.mid_outpin[node]
                    # print(source_out_item)
                    for pin in source_out_item:
                        if pin[0] >= 0:
                            w = self.block.wirevector_by_name[pin[2]]
                            self.gpu_varname[w][0] = mid_in_flag + 'midout_' + pin[1]  # limb>1时，记录的是起点
                            self.gpu_varname[w][1] = pin[0]


            if layer < self.layercount:  # 对应中间输出，lk,reg,in,const等同名不用管
                out_item = self.mid_outpin[n[1]]
                # print(out_item)
                for pin in out_item:
                    if pin[0] >= 0:
                        w = self.block.wirevector_by_name[pin[2]]
                        self.gpu_varname[w][0] = mid_out_flag + 'midout_' + pin[1]
                        self.gpu_varname[w][1] = pin[0]  # limb>1时，记录起点
                        # val_type = self._val_type(w)
                        # self._declare_wv(write, w, val_type)
                        # for n in range(CompiledSimulation._limbs(self, w)):
                        #     write('*(' + mid_out_flag + 'midout_' + val_type + str(pin[0] + n) + '+i*W+tid)=' +
                        #           self.gpu_varname[w] + '[' + str(n) + '*W+tid];')
            if isinstance(net, LogicNet):
                for arg in set(net.args):
                    if isinstance(arg, Const):
                        self._declare_gpu_wv(write, arg, self._val_type(arg))
                if net.op in '+-':
                    write('uint64_t tmp, carry;')
                elif net.op == '*':
                    write('uint64_t tmp, carry, tmphi, tmplo=0;')
                else:
                    pass
                reg_default_str=''

                if net.op=='r':
                    rout=net.dests[0]
                    write('__shared__ {type} shared_{vn}[{limbs}][tnum+1];'.format(type=self._val_type(rout),
                                                                                   vn=self.gpu_varname[rout][2],
                                                                                   limbs=ESSENT._limbs(self, rout)))
                    write('__shared__ {type} shared_{vn}_default[{limbs}][1];'.format(type=self._val_type(rout),
                                                                                         vn=self.gpu_varname[rout][2],
                                                                                         limbs=ESSENT._limbs(self,
                                                                                                             rout)))
                    write('if(tid==0){')
                    for n in range(ESSENT._limbs(self,net.dests[0])):

                        write('shared_{rn}_default[{n}][0] = {rn}_default[{n}][0];'.format(
                            rn=self.gpu_varname[net.dests[0]][2],n=n))
                        reg_default_str = '{rn}_default[{n}][0] = shared_{rn}_default[{n}][0];'.format(
                            rn=self.gpu_varname[net.dests[0]][2],n=n)
                    write('}')
                    write('__syncthreads();')
                write('for(int it=0;it<W/tnum;it++)')
                write('{')
                self._write_logicnet(write, net)
                #write('__syncthreads();')
                write('}')
                write('if(tid==0){')
                write(reg_default_str)
                write('}')
                write('__syncthreads();')


            else:
                id = self.re_layer[layer][i][0]
                if id in self.macro_loop_index:
                    # print(net)
                    # write('marco')
                    # print(layer)
                    write('if(tid==0){')
                    # self._declare_macro_wv_com(write, net)
                    # self._connect_logicnet_with_macro_com(write, net, layer, i)
                    # self.write_macro_simbody_com(write, net)
                    loop_block = self.loops_block[id]
                    self.ess_macro_pretreat(loop_block)
                    self._declare_macro_wv_ess(write,loop_block,id)
                    # # 声明定义Flag
                    # if self.optimization in [2, 3]:
                    #     write('  ' + 'int Flag_same_inputs[{n}]='.format(n=len(self.stmtsOrdered)) + '{' + '0,' * len(
                    #         self.stmtsOrdered) + '};')
                    #     write(' ')
                    write('for(int j=0;j<W;j++){')
                    self._connect_logicnet_with_macro_in_ess(write, loop_block, net, layer, i)
                    self.write_macro_simbody_ess(write, loop_block)
                    self._connect_logicnet_with_macro_out_ess(write, loop_block,id, net, layer)
                    write('}')
                    write('}')
                    write('__syncthreads();')
                else:
                    for arg in net.args:
                        if isinstance(arg, Const):
                            self._declare_gpu_wv(write, arg, self._val_type(arg))
                    reg_name = self._declare_mergelogicnet_wv(write, net)
                    reg_default_str = ''
                    if reg_name:
                        write('if(tid==0){')
                        for r in reg_name:
                            for n in range(ESSENT._limbs(self, r)):
                                write('shared_{rn}_default[{n}][0] = {rn}_default[{n}][0];'.format(
                                    rn=self.gpu_varname[r][2], n=n))
                                reg_default_str += '{rn}_default[{n}][0] = shared_{rn}_default[{n}][0];'.format(
                                    rn=self.gpu_varname[r][2], n=n)
                        write('}')
                        write('__syncthreads();')
                    write('for(int it=0;it<W/tnum;it++)')
                    write('{')
                    for node in net.logicnet:
                        self._write_logicnet(write, self.indexTonet[node])
                    # write('__syncthreads();')
                    write('}')
                    write('if(tid==0){')
                    write(reg_default_str)
                    write('}')
                    write('__syncthreads();')
                    self.merge_inter_wv = []

            write('break;')
            write('}')

        write('default:break;')
        write('}')

    def loop_declare_str(self,net):
        declare_in_str = []
        for arg in net.args:
            if isinstance(arg,Input):
                declarename1 = 'uint64_t ' + self.gpu_varname[arg][0] + '[' + str(
                    ESSENT._limbs(self, arg)) + '*W]'
            else:
                declarename1 = 'uint64_t loop_' + self.gpu_varname[arg][0] + '[' + str(
                ESSENT._limbs(self, arg)) + '*W]'
            if declarename1 not in declare_in_str:
                declare_in_str.append(declarename1)

        for dest in net.dests:
            if isinstance(dest,Output):
                declarename1 = 'uint64_t ' + self.gpu_varname[arg][0] + '[' + str(
                    ESSENT._limbs(self, arg)) + '*W]'
            else:
                declarename2 = 'uint64_t loop_' + self.gpu_varname[dest][0] + '[' + str(
                ESSENT._limbs(self, dest)) + '*W]'
            # print(declarename2)
            if declarename2 not in declare_in_str:
                declare_in_str.append(declarename2)

        # print(call_in_str)
        return declare_in_str

    def loops_in_out(self):
        loops_in_out = {} #{index:[{w:[pos,limb]},pos]}
        for item in self.loop_indexTonet.items():
            loop_net = item[1]
            in_out = {}
            pos = 0
            for w in set(loop_net.args):
                limb = ESSENT._limbs(self, w)
                in_out[w] = [pos, limb]
                pos += limb
            for w in set(loop_net.dests):
                limb = ESSENT._limbs(self, w)
                in_out[w] = [pos, limb]
                pos += limb
            loops_in_out[item[0]]=[in_out,pos]
        return loops_in_out


    def loop_call_str(self,index):
        net = self.loop_indexTonet[index]
        w_pos = self.loops_in_out_malloc[index][0]
        call_in_str = []
        for arg in net.args:
            callname1 = 'h_loop{index}+{pos}*W'.format(index=index, pos=w_pos[arg][0])
            if callname1 not in call_in_str:
                call_in_str.append(callname1)

        for dest in net.dests:
            callname2 = 'h_loop{index}+{pos}*W'.format(index=index, pos=w_pos[dest][0])
            if callname2 not in call_in_str:
                call_in_str.append(callname2)


        return call_in_str

    def simlayer_declare_str(self):

        layers_declare_in_str = []
        nodetype = self.g.ndata['nodetype'].tolist()
        pre_id = self.g.ndata['id'].tolist()
        for layer in self.re_layer:
            declare_in_str = []
            for node in layer:
                net = self.indexTonet[node[0]]
                in_nodes = self.g.in_edges(pre_id.index(node[0]))[0].tolist()  # qian继节点
                for in_node in in_nodes:
                    if nodetype[in_node] == 1:  # 前继节点是环
                        index = pre_id[in_node]
                        in_net = self.indexTonet[index]
                        loop_in_out = self.loops_in_out_malloc[index][0]  # {w:[pos,limb]}
                        for arg in net.args:
                            if wv_is_dests(in_net, arg):
                                declarename1 = 'uint64_t loop_' + self.gpu_varname[arg][0] + '[' + str(
                                    ESSENT._limbs(self, arg)) + '*W]'
                                if declarename1 not in declare_in_str:
                                    declare_in_str.append(declarename1)

                out_nodes = self.g.out_edges(pre_id.index(node[0]))[1].tolist()  # 后继节点
                for out_node in out_nodes:
                    if nodetype[out_node] == 1:  # 后继节点是环
                        index = pre_id[out_node]
                        out_net = self.indexTonet[index]
                        #print(net)
                        #print(out_net)
                        for dest in net.dests:
                            if wv_is_args(out_net, dest):
                                declarename1 = 'uint64_t loop'+str(index)+'_' + self.gpu_varname[dest][0] + '[' + str(
                                    ESSENT._limbs(self, dest)) + '*W]'
                                if declarename1 not in declare_in_str:
                                    #print(dest)
                                    self.w_conn_loop[dest.name].add(index)
                                    declare_in_str.append(declarename1)


                for arg in net.args:
                    if isinstance(arg, Input):
                        declarename1 = 'uint64_t ' + self.gpu_varname[arg][0] + '[' + str(
                            CompiledSimulation._limbs(self, arg)) + '*W]'
                        if declarename1 not in declare_in_str:
                            declare_in_str.append(declarename1)

                for dest in net.dests:
                    if isinstance(dest, Output):
                        declarename2 = 'uint64_t ' + self.gpu_varname[dest][0] + '[' + str(
                            CompiledSimulation._limbs(self, dest)) + '*W]'
                        # print(declarename2)
                        if declarename2 not in declare_in_str:
                            declare_in_str.append(declarename2)
                            # print(self._gpu_getdestlimb(dest,n ))

            # print(call_in_str)
            layers_declare_in_str.append(declare_in_str)


        return layers_declare_in_str

    def simlayer_call_str(self):
        layers_call_in_str = []
        nodetype = self.g.ndata['nodetype'].tolist()
        pre_id = self.g.ndata['id'].tolist()
        for layer in self.re_layer:
            call_in_str = []
            for node in layer:
                net = self.indexTonet[node[0]]
                in_nodes = self.g.in_edges(pre_id.index(node[0]))[0].tolist()  # qian继节点
                for in_node in in_nodes:
                    if nodetype[in_node] == 1:  # 前继节点是环
                        index = pre_id[in_node]
                        in_net = self.indexTonet[index]
                        loop_in_out = self.loops_in_out_malloc[index][0] #{w:[pos,limb]}
                        for arg in net.args:
                            if wv_is_dests(in_net,arg):
                                callname1 = 'd_loop{index}+{pos}*W'.format(index=index,pos = loop_in_out[arg][0])
                                if callname1 not in call_in_str:
                                    call_in_str.append(callname1)

                out_nodes = self.g.out_edges(pre_id.index(node[0]))[1].tolist() #后继节点
                for out_node in out_nodes:
                    if nodetype[out_node] == 1: #后继节点是环
                        index = pre_id[out_node]
                        out_net = self.indexTonet[index]
                        loop_in_out = self.loops_in_out_malloc[index][0]  # {w:[pos,limb]}
                        for dest in net.dests:
                            if wv_is_args(out_net,dest):
                                callname1 = 'd_loop{index}+{pos}*W'.format(index=index,pos = loop_in_out[dest][0])
                                if callname1 not in call_in_str:
                                    call_in_str.append(callname1)

                for arg in net.args:
                    if isinstance(arg, Input):
                        limbs = ESSENT._limbs(self, arg)
                        callname1 = 'd_' + self.gpu_varname[arg][0] + '+i*W*' + str(limbs)
                        if callname1 not in call_in_str:
                            call_in_str.append(callname1)

                for dest in net.dests:
                    if isinstance(dest, Output):
                        limbs = ESSENT._limbs(self, dest)
                        callname1 = 'd_' + self.gpu_varname[dest][0] + '+i*W*' + str(limbs)
                        # print(declarename2)
                        if callname1 not in call_in_str:
                            call_in_str.append(callname1)
            # print(call_in_str)
            layers_call_in_str.append(call_in_str)

        return layers_call_in_str



    def create_code(self, filename):

        # self._dir = tempfile.mkdtemp()
        self._dir = os.getcwd().replace('\\', '/') + '/'
        # print(self._dir)

        with open(self._dir + filename + '.cuh', 'w') as f:
            self._create_cuh(lambda s: f.write(s + '\n'))
        with open(self._dir + filename + '_sim.cuh', 'w') as fmain:
            self._create_sim(lambda s: fmain.write(s + '\n'), filename)
        with open(self._dir + filename + '_main.cu', 'w') as fmain:
            self._create_main(lambda s: fmain.write(s + '\n'), filename)

        # subprocess.check_call(['nvcc', '-o', self._dir + filename + '.so','-shared','-Xcompiler' '-fPIC',self._dir + filename + '_sim.cu'],
        #                       shell=(platform.system() == 'Windows'))
        # self._dll = ctypes.CDLL(self._dir+filename+'.so')
        # self._crun = self._dll.sim


    def _create_sim(self, write, filename):

        write('#include "{filename}.cuh"'.format(filename=filename))
        write('#include "{filename}_inputs.cuh"'.format(filename=filename))


        inputs = list(self.block.wirevector_subset(Input))
        outputs = list(self.block.wirevector_subset(Output))


        write('void pre(){')

        write('// set up device')
        write('int dev = 0;')
        write('cudaSetDevice(dev);')
        write('init_mem();')
        write('}')

        write('void cpuTogpu(){')
        write('size_t len = stepcount * sizeof(uint64_t);')
        write('size_t nBytes = W * sizeof(uint64_t);')
        write('uint64_t warpcount = int(ceil(stepcount/W));')
        write('// mallocbdevice global memory')
        s_s = []  # 参数
        self._inputpos = {}  # for each input wire, start and number of elements in input array
        self._inputbw = {}  # bitwidth of each input wire
        ipos = 0
        for w in inputs:
            self._inputpos[w.name] = ipos, CompiledSimulation._limbs(self, w)
            self._inputbw[w.name] = w.bitwidth

            write('cudaMalloc((uint64_t **) & d_{vn}, len*{limb});'.format(vn=self.gpu_varname[w][0],
                                                                           limb=CompiledSimulation._limbs(self, w)))

            for i in range(CompiledSimulation._limbs(self, w)):  ########################
                # write('cudaMemcpy({vn}+{i}*w, inputs[{pos}], nBytes, cudaMemcpyHostToDevice);'.format(
                #           i=i, vn=self.gpu_varname[w][0], pos=ipos))
                ipos += 1
            s_s.append(self.gpu_varname[w][0])

        self._ibufsz = ipos  # total length of input array

        e_s = []
        self._outputpos = {}  # for each output wire, start and number of elements in output array
        opos = 0
        for w in outputs:
            self._outputpos[w.name] = opos, CompiledSimulation._limbs(self, w)

            write('cudaMalloc((uint64_t **) & d_{vn}, len*{limb});'.format(vn=self.gpu_varname[w][0],
                                                                           limb=CompiledSimulation._limbs(self, w)))

            e_s.append(self.gpu_varname[w][0])
            opos += 1
        self._obufsz = opos  # total length of output array

        for w in inputs:
            write('cudaMallocHost((uint64_t **) & h_{vn}, len*{limb});'.format(vn=self.gpu_varname[w][0],
                                                                               limb=CompiledSimulation._limbs(self, w)))
            write('for(int i=0;i<warpcount;i++){')
            for n in range(CompiledSimulation._limbs(self, w)):
                write('memcpy(h_{vn}+i*W*1+W*{n}, {vn}[{n}]+i*W, nBytes);'.format(n=n, vn=self.gpu_varname[w][0]))
            write('}')
        for w in outputs:
            write('cudaMallocHost((uint64_t **) & h_{vn}, len*{limb});'.format(vn=self.gpu_varname[w][0],
                                                                               limb=CompiledSimulation._limbs(self, w)))

        self.loops_in_out_malloc = self.loops_in_out()
        for loop in self.loops_in_out_malloc.items():
            write('cudaMallocHost((uint64_t **) & h_loop{index}, nBytes*{pos});'.format(index=loop[0],pos=loop[1][1]))
            write('cudaMallocHost((uint64_t **) & d_loop{index}, nBytes*{pos});'.format(index=loop[0], pos=loop[1][1]))

        write('cudaStream_t stream[{streamnum}]={{nullptr}};'.format(streamnum=len(inputs)))
        write('for(int i=0;i<{streamnum};i++)'.format(streamnum=len(inputs)))
        write('{')
        write('cudaStreamCreate(&stream[i]); }')

        for streamid, w in enumerate(inputs):
            write(
                'cudaMemcpyAsync(d_{vn}, h_{vn}, len*{limbs}, cudaMemcpyHostToDevice,stream[{streamid}]);'.format(
                    limbs=ESSENT._limbs(self, w), vn=self.gpu_varname[w][0], streamid=streamid))
        write('for(int i=0;i<{streamnum};i++)'.format(streamnum=len(inputs)))
        write('{')
        write('cudaStreamSynchronize(stream[i]);')
        write('cudaStreamDestroy(stream[i]);')
        write('}')
        write('}')


        gpusim_declare_in = []
        output_declare_in = []
        output_call_in = []
        for w in inputs:
            gpusim_declare_in.append('uint64_t* h_' + self.gpu_varname[w][0] + ',uint64_t* d_' + self.gpu_varname[w][0])

        for w in outputs:
            output_declare_in.append('uint64_t* h_' + self.gpu_varname[w][0])
            output_call_in.append('h_' + self.gpu_varname[w][0])
            gpusim_declare_in.append('uint64_t* h_' + self.gpu_varname[w][0] + ',uint64_t* d_' + self.gpu_varname[w][0])

        write('void gpusim() ')
        write('{')
        write('size_t nBytes = W * sizeof(uint64_t);')
        write('uint64_t warpcount = int(ceil(stepcount/W));')
        write('cudaStream_t stream[2]={{nullptr}};')
        write('for(int i=0;i<2;i++)')
        write('{')
        write('cudaStreamCreate(&stream[i]); }')
        write('cudaEvent_t event[2];')
        write('for(int i=0;i<2;i++)')
        write('{')
        write('cudaEventCreate(&event[i]); }')
        write('for(int i=0;i<warpcount;i++){')
        layers_call_in_str = self.simlayer_call_str()
        #print(self.loop_locate)
        for i, layerlist in enumerate(self.re_layer):
            if len(layerlist) > 0:
                write(
                    'simlayer{i}<<<{count}*{blocknum},tnum,0,stream[0]>>>({in_str});'.format(i=i, count=len(layerlist),
                                                                                             blocknum=
                                                                                             self.blocknum_list[i],
                                                                                             in_str=','.join(
                                                                                                 layers_call_in_str[i]),
                                                                                             ))

            if i in self.loop_locate.keys():
                for loop in self.loop_locate[i]:
                    loop_call_in_str = self.loop_call_str(loop)
                    write('cudaStreamSynchronize(stream[1]);')
                    write('loop{index}({in_str});'.format(index=loop, in_str=','.join(loop_call_in_str)))
                    write(
                        'cudaMemcpyAsync(d_loop{index},h_loop{index}, nBytes*{pos}, cudaMemcpyHostToDevice,stream[1]);'.format(
                            pos=self.loops_in_out_malloc[loop][1], index=loop))
                    write('cudaEventRecord(event[0],stream[1]);'.format(index=loop))
                    write('cudaStreamWaitEvent(stream[0],event[0]);'.format(index=loop))

            if i + 1 in self.loop_locate.keys():
                for loop in self.loop_locate[i + 1]:
                    write('cudaEventRecord(event[1],stream[0]);'.format(index=loop))
                    write('cudaStreamWaitEvent(stream[1],event[1]);'.format(index=loop))
                    write(
                        'cudaMemcpyAsync(h_loop{index},d_loop{index}, nBytes*{pos}, cudaMemcpyDeviceToHost,stream[1]);'.format(
                            pos=self.loops_in_out_malloc[loop][1], index=loop))

        write('cudaError_t err=cudaGetLastError();')
        write('if(err != cudaSuccess)  printf("cuda error: %s",cudaGetErrorString(err));')
        write('}')

        write('for(int i=0;i<2;i++)')
        write('{')
        write('cudaStreamSynchronize(stream[i]);')
        write('}')
        write('for(int i=0;i<2;i++)')
        write('{cudaStreamDestroy(stream[i]);')
        write('cudaEventDestroy(event[i]);}')
        write('}')



        write('void gpuTocpu(){')
        if len(outputs):
            write('size_t nBytes = W * sizeof(uint64_t);')
            write('uint64_t warpcount = int(ceil(stepcount/W));')
            write('cudaStream_t stream[{streamnum}]={{nullptr}};'.format(streamnum=len(outputs)))
            write('for(int i=0;i<{streamnum};i++)'.format(streamnum=len(outputs)))
            write('{')
            write('cudaStreamCreate(&stream[i]); }')
            write('for(int i=0;i<warpcount;i++){')
            for streamid, w in enumerate(outputs):
                for limb in range(CompiledSimulation._limbs(self, w)):
                    write(
                        'cudaMemcpyAsync(h_{vn}+{limb}*stepcount+i*W,d_{vn}+{limb}*W, nBytes, cudaMemcpyDeviceToHost,stream[{streamid}]);'.format(
                            vn=self.gpu_varname[w][0], limb=limb, streamid=streamid))
            write('}')
            write('for(int i=0;i<{streamnum};i++)'.format(streamnum=len(outputs)))
            write('{')
            write('cudaStreamSynchronize(stream[i]);')
            write('cudaStreamDestroy(stream[i]);')
            write('}')

        write('}')

        write('void print_outputs({in_str})'.format(in_str=','.join(output_declare_in)))
        write('{FILE *fp;')
        write('fp = fopen("{filename}_outputs.csv", "w");'.format(filename=self.filename))
        for name in output_call_in:
            write('fprintf(fp, "{name},");'.format(name=name))
            write('for(int i=0;i<stepcount;i++){')
            write('fprintf(fp, "%u,",*({output_name}+i));'.format(output_name=name))
            write('}')
            write('fprintf(fp, "' + '\\' + 'n' + '");')
        write('fclose(fp);')
        write('}')

        write('void reset(){')
        s_e_wire = inputs.copy()
        s_e_wire.extend(outputs)
        for w in s_e_wire:
            write('cudaFreeHost(h_{vn});'.format(vn=self.gpu_varname[w][0]))
            write('cudaFree(d_{vn});'.format(vn=self.gpu_varname[w][0]))

        write('cudaDeviceReset();')
        write('}')

    def _create_cuh(self, write):

        write('#include <stdint.h>')
        write('#include <stdlib.h>')
        write('#include <string.h>')
        write('#include <math.h>')
        write('#include <cuda_runtime.h>')
        write('#include <stdio.h>')
        write('#include <sys/time.h>')
        write('#define stepcount {stepcount}'.format(stepcount=self.stepcount))
        write('#define W {W}'.format(W=self.W))
        write('#define tnum {tnum}\n'.format(tnum=self.tnum))

        # windows dllexport needed to make symbols visible
        if platform.system() == 'Windows':
            write('#define EXPORT __declspec(dllexport)')
        else:
            write('#define EXPORT')

        gpu_mulstr = '''__device__ void gpu_mul128(uint64_t x, uint64_t y,uint64_t* lo, uint64_t* hi) {
             volatile register uint32_t mul[4]; // NEVER REMOVE VOLATILE HERE!!!
             // 128-bit = 64-bit * 64-bit
             asm("mul.lo.u32 %0, %4, %6;"
             "mul.hi.u32 %1, %4, %6;"
             "mul.lo.u32 %2, %5, %7;"
             "mul.hi.u32 %3, %5, %7;"
             "mad.lo.cc.u32 %1, %4, %7, %1;"
             "madc.hi.cc.u32 %2, %4, %7, %2;"
             "addc.u32 %3, %3, 0;"
             "mad.lo.cc.u32 %1, %5, %6, %1;"
             "madc.hi.cc.u32 %2, %5, %6, %2;"
             "addc.u32 %3, %3, 0;"
             : "+r"(mul[0]), "+r"(mul[1]), "+r"(mul[2]), "+r"(mul[3])
             : "r"(((uint32_t *)&x)[0]), "r"(((uint32_t *)&x)[1]),
             "r"(((uint32_t *)&y)[0]), "r"(((uint32_t *)&y)[1]));

             *lo=((uint64_t)mul[1]<<32)|mul[0];
             *hi=((uint64_t)mul[3]<<32)|mul[2];

             }
             '''
        write(gpu_mulstr)

        self.loops_in_out_malloc = self.loops_in_out()
        for loop in self.loops_in_out_malloc.items():
            write('uint64_t *h_loop{index};'.format(index=loop[0]))
            write('uint64_t *d_loop{index};'.format(index=loop[0]))
        inputs = list(self.block.wirevector_subset(Input))
        outputs = list(self.block.wirevector_subset(Output))
        for w in inputs:
            # write('extern uint64_t {vn}[{limb}][stepcount];'.format(vn=self.gpu_varname[w][0],
            #                                                          limb=CompiledSimulation._limbs(self, w)))
            write('uint64_t *d_{vn},*h_{vn};'.format(vn=self.gpu_varname[w][0]))

        for w in outputs:
            write('uint64_t *d_{vn},*h_{vn};'.format(vn=self.gpu_varname[w][0]))


        '''
        # multiplication macro
        #  for efficient 64x64 -> 128 bit multiplication without uint128_tw
        #  as -O0 optimization does not handle uint128_t well

        machine_alias = {'amd64': 'x86_64', 'aarch64': 'arm64', 'aarch64_be': 'arm64'}
        machine = platform.machine().lower()
        machine = machine_alias.get(machine, machine)
        mulinstr = {
            'x86_64': '"mulq %q3":"=a"(pl),"=d"(ph):"%0"(t0),"r"(t1):"cc"',
            'arm64': '"mul %0, %2, %3\n\tumulh %1, %2, %3":'
                     '"=&r"(*pl),"=r"(*ph):"r"(t0),"r"(t1):"cc"',
            'mips64': '"dmultu %2, %3\n\tmflo %0\n\tmfhi %1":'
                      '"=r"(*pl),"=r"(*ph):"r"(t0),"r"(t1)',
        }
        if machine in mulinstr:
            write('#define mul128(t0, t1, pl, ph) __asm__({})'.format(mulinstr[machine]))
        '''
        reg_declare = set()
        #print(self.loops_reg)
        print(self.loop_locate)
        for la in self.loop_locate.values():
            for lo in la:
                for reg in self.loops_reg[lo]:
                    if reg not in reg_declare:
                        val_type = self._val_type(reg)
                        ESSENT._declare_wire(self, write, reg, val_type)

        for item in self.mid_declare.items():  # 运算时不断重新占用地址
            for i in range(item[1]):
                write('__device__ ' + item[0] + ' _0_midout_' + item[0] + str(i) + '[W]={0};')
                write('__device__ ' + item[0] + ' _1_midout_' + item[0] + str(i) + '[W]={0};')

        # for wname in self.loops_input:
        #     w = self.block.wirevector_by_name[wname]
        #     val_type = self._val_type(w)
        #     write('{type} {name}[{limbs}][W]={{0}};'.format(type=val_type,
        #                                                                limbs=CompiledSimulation._limbs(self, w),
        #                                                                name=self.gpu_varname[w][0]))
        # for wname in self.loops_output:
        #     w = self.block.wirevector_by_name[wname]
        #     val_type = self._val_type(w)
        #     write('{type} {name}[{limbs}][W]={{0}};'.format(type=val_type,
        #                                                     limbs=CompiledSimulation._limbs(self, w),
        #                                                     name=self.gpu_varname[w][0]))

        for wname in self.layer_long_keep_wire:
            w = self.block.wirevector_by_name[wname]
            val_type = self._val_type(w)
            write('__device__ {type} {name}[{limbs}][W]={{0}};'.format(type=val_type,
                                                                       limbs=CompiledSimulation._limbs(self, w),
                                                                       name=self.gpu_varname[w][0]))

        self.init_reg={}
        for w in self.block.wirevector_subset(Register):

            rval = self._regmap.get(w, w.reset_value)
            if rval is None:
                rval = self.default_value
            else:
                self.init_reg[w] = rval

            val_type = self._val_type(w)
            if w.name in self.loops_input or w in self.loops_output:
                write('__device__ {type} {name}[{limbs}][W];'.format(type=val_type,
                                                                     limbs=CompiledSimulation._limbs(self,
                                                                                                     w),
                                                                     name=self.gpu_varname[w][0]))
                write('__device__ {type} {name}_default[{limbs}][1]={val};'.format(type=val_type,
                                                                                   limbs=CompiledSimulation._limbs(self,
                                                                                                                   w),
                                                                                   name=self.gpu_varname[w][0],
                                                                                   val=self._gpu_makeini(w, rval)))

            elif w.name in self.loops_inter_reg:
                write('__device__ {type} {name}[{limbs}][W];'.format(type=val_type,
                                                                     limbs=CompiledSimulation._limbs(self,
                                                                                                     w),
                                                                     name=self.gpu_varname[w][0]))
                write('__device__ {type} {name}_default[{limbs}][1]={val};'.format(type=val_type,
                                                                                   limbs=CompiledSimulation._limbs(self,
                                                                                                                   w),
                                                                                   name=self.gpu_varname[w][0],
                                                                                   val=self._gpu_makeini(w, rval)))

            else:
                write('__device__ {type} {name}[{limbs}][W];'.format(type=val_type,
                                                                     limbs=CompiledSimulation._limbs(self,
                                                                                                     w),
                                                                     name=self.gpu_varname[w][0]))
                write('__device__ {type} {name}_default[{limbs}][1]={val};'.format(type=val_type,
                                                                                   limbs=CompiledSimulation._limbs(self,
                                                                                                                   w),
                                                                                   name=self.gpu_varname[w][0],
                                                                                   val=self._gpu_makeini(w, rval)))


        # declare memories
        mems = {net.op_param[1] for net in self.block.logic_subset('m@')}
        if mems:
            for key in self._memmap:
                if key not in mems:
                    raise PyrtlError('unrecognized MemBlock in memory_value_map')
                if isinstance(key, RomBlock):
                    raise PyrtlError('RomBlock in memory_value_map')
            self._gpu_declare_mem_helpers(write)
            roms = {mem for mem in mems if isinstance(mem, RomBlock)}
            self._gpu_declare_roms(write, roms)
            mems = {mem for mem in mems if isinstance(mem, MemBlock) and not isinstance(mem, RomBlock)}
            self._gpu_declare_mems(write, mems)

        self.loops_in_out_malloc = self.loops_in_out()
        #print(self.w_conn_loop.keys())
        layers_declare_in_str = self.simlayer_declare_str()
        #print(self.w_conn_loop)
        for layer,layerlist in enumerate(self.re_layer):
            if len(layerlist)>0:
                write('__global__ void simlayer{layer}({in_str})'.format(layer=layer,
                                                                     in_str=','.join(layers_declare_in_str[layer])))

                write('{')

                self.writesimbody(write, layer)

                write('}')


        for item in self.loop_locate.items():
            for loop in item[1]:
                loop_block = self.loops_block[loop]
                net = self.indexTonet[loop]
                loop_declare_in_str = self.loop_declare_str(net)
                write('void loop{index}({in_str})'.format(index=loop, in_str=','.join(loop_declare_in_str)))
                write('{')
                # print(net)
                # write('marco')
                # print(layer)

                # self._declare_macro_wv_com(write, net)
                # self._connect_logicnet_with_macro_com(write, net, layer, i)
                # self.write_macro_simbody_com(write, net)
                self.ess_macro_pretreat(loop_block)
                self._declare_macro_wv_ess(write, loop_block,loop)
                # 声明定义Flag
                if self.optimization in [2, 3]:
                    write('  ' + 'int Flag_same_inputs[{n}]='.format(n=len(self.stmtsOrdered)) + '{' + '0,' * len(
                        self.stmtsOrdered) + '};')
                    write(' ')
                write('for(int j=0;j<W;j++){')
                self._connect_logicnet_with_macro_in_ess(write, loop_block, net, item[0],0)
                self.write_macro_simbody_ess(write, loop_block)
                self._connect_logicnet_with_macro_out_ess(write, loop_block,loop, net, item[0])
                write('}')
                write('}')


        write('void init_mem(){')
        if mems:
            write('initialize_mems<<<1,{mem_num}>>>();'.format(mem_num=len(mems)))
        write('}')



    def _create_main(self,write,filename):
        write(f'#include "{filename}_sim.cuh"')
        write('int main(){')
        write('pre();')
        write('cpuTogpu();')
        write('struct timeval start,end;')
        write('gettimeofday(&start,NULL);')
        inputs = list(self.block.wirevector_subset(Input))
        outputs = list(self.block.wirevector_subset(Output))
        gpusim_call_in = []
        output_call_in = []
        for w in inputs:
            gpusim_call_in.append('d_' + self.gpu_varname[w][0])
        for w in outputs:
            output_call_in.append('h_' + self.gpu_varname[w][0])
            gpusim_call_in.append('d_' + self.gpu_varname[w][0])
        write('gpusim();')
        timeend = '''
                        gettimeofday(&end,NULL);
                        double timeuse = ( end.tv_sec - start.tv_sec ) + (end.tv_usec - start.tv_usec)/1000000.0;  
                        printf("time=%f",timeuse);
                        '''
        write(timeend)
        write('gpuTocpu();')
        write('print_outputs({in_str});'.format(in_str=','.join(output_call_in)))
        write('reset();')
        write('return 0;')
        write('}')

