#!/usr/bin/env python
# -*- coding: utf-8 -*-

import glob
import re
import networkx as nx
import matplotlib.pyplot as plt
from matplotlib.lines import Line2D
import copy
import json
import os
import argparse
import markdown
from difflib import get_close_matches  # for giving close keywords
import xlsxwriter
import hashlib
import pathlib
import time
import ast
import itertools
import shutil


def get_content_sha256(content: str) -> str:
    """Calculate SHA256 hash of given content."""
    sha256_hash = hashlib.sha256()
    sha256_hash.update(content.encode('utf-8'))
    return sha256_hash.hexdigest()


INSTRUCTION_BIT_LENGTH = 128

# Top level keywords
TOP_KEYS = {'__DefBitFieldType',
            '__DefImmBitFieldType',
            '__DefPropType',
            '__DefGroup',
            '__DefOptype',
            '__DefOpcode',
    }

# member keywords
MEMBER_KEYS = {
#           '__Format',   # deprecated
            '__Encoding',
            '__Property',
            '__OperandInfo',
            '__Exception',
            '__Examples',
            '__Semantics',
            '__Description',
            '__Simulation',
            '__Syntax',
            '__ModifierInfo'
    }

OPERANDINFO_LISTS = {
    'InList',
    'OutList',
    'Order',
    'ModiOrder'
    }

OPERANDINFO_DICTS = {
            'Bitwidth',
            'AsmFormat',
}

OPERANDINFO_ATTRS = {*OPERANDINFO_LISTS, *OPERANDINFO_DICTS}

## Attribute to store plain text content in __OperandInfo nodes
OPERANDINFO_PLAINTEXT = 'PlainText'

# all keywords
ALL_KEYS = set([*TOP_KEYS, *MEMBER_KEYS])

ASSOCIATED_ATTRS = {'neg', 'abs', 'not', 'bitnot', 'hsel', 'bsel', 'hsel2', 'vsel', 'addrmode', 'rstride', 'buf'}

IMAGE_FORMAT= 'svg'

FUNC_IMM_BFS = {'ULOP3.lut',
                'LOP3.lut',
                'UPLOP3.lut',
                'PLOP3.lut',
                'LEA.shiftamt',
                'ULEA.shiftamt',
                'HMMA_SP.mdsel',
                'IMMA_SP.mdsel',
                'QMMA_SP.mdsel'}

ASMFORMAT_FUNCS = {'CvtVSel', 'CvtVPSel', 'CvtFImm', 'CvtINegX', 'CvtIShift'}

# Funcs are usually associated with OPTYPE_R+, but sometimes there may be conflicts, (Such as BMOV_R_B/BMOV_B_R)
# thus stype should be added to FuncKey to avoid this.
# TODO: Currenly those optypes all go to user codes, no need for special treatment?
OPTYPES_FUNC_WITH_STYPE = {'BMOV', 'ATOM', 'ATOMG', 'CCTL', 'CCTLL'}

# Use operators to avoid possible name conflict with var names
AST_OP_DICT = {'Add' : '+', 'Mult' : '*', 'Div' : '/',
               'Eq' : '==', 'And' : '&&', 'Or' : '||',
               'Sub' : '-', 'Not' : '!', 'NotEq' : '!='}

# Pattern for insignificant spaces, they will be collapsed first, and removed finally
# Spaces between words([0-9A-Za-z_]) will be kept, others will be removed
p_InsignificantSpace = re.compile(r'((?<=[\w\?])\s+(?![\w\?]))|((?<![\w\?])\s+(?=[\w\?]))|((?<![\w\?])\s+(?![\w\?]))')

p_comment = re.compile('//.*$')

# keyword lines
p_topkey_line = re.compile('^(?P<KeyName>' + ('|'.join(TOP_KEYS)) + r')\b')
p_memberkey_line = re.compile('^(?P<KeyName>' + ('|'.join(MEMBER_KEYS)) + r')\b')
p_key_line = re.compile('^(?P<KeyName>' + ('|'.join(ALL_KEYS)) + r')\b')
p_key_escape = re.compile(r'(?P<EscapeKeyName>\\' + ('|'.join(ALL_KEYS)) + r')\b')
p_reflink = re.compile(r'\[\[(?P<Target>\w+)\]\]')

p_group = re.compile(r'^__Def(Group|Optype|Opcode) (?P<Name>\w+)(:\[(?P<Inherit>[\w,]+)\])?$')

#
p_def_bitfieldtype = re.compile(r'__Def\w+ (?P<Name>\w+)<(?P<BitWidth>[0-9:]+)>;?$')

# Encoding fields
p_field = re.compile(r'field\s*<(?P<FRange>[^>]*)>\s?(?P<FType>\S+)\s+(?P<FVar>[^\s=;]+)(?P<FVal>.*)?;')

#
p_enum = re.compile(r'(?P<Name>[\w\.]+)(=(?P<Value>0x[0-9a-fA-F]+))?;')

p_pkey_line = re.compile(r'^(?P<KeyName>__\w+)')

p_operandinfo_attr = re.compile(r'^(?P<Attr>' + (r'|'.join(OPERANDINFO_ATTRS)) + r')\<(?P<Obj>[\w,\[\.\]]*)\>\s*' + r'(?P<Value>=.*)?;$')

p_asmformat = re.compile(r'^(?P<Func>\w+)\((?P<Var>[\w\.]+),(?P<Modi>[\w\.]+)\)$')

p_exception_line = re.compile(r'^(?P<ExcepType>EncodingError|RuntimeException)\<(?P<ExcepName>\w+)(,(?P<Reason>\"[^"]+\"))?\>(=(?P<CondExpr>.*))?;')

def getUnusedBitsList(set_bits):
    zlist = []
    z0 = -1
    for seg in range(2):
        for si in range(64):
            i = seg*64 + si
            b = ((1<<i) & set_bits)==0
            if b:
                if z0 == -1:
                    z0 = i
            else:
                if z0 != -1:
                    zlist.append((z0, i))
                    z0 = -1
        if z0 != -1:
            zlist.append((z0, seg*64+64))
            z0 = -1

    return zlist

def checkDirExistence(dname):
    if not os.path.isdir(dname):
        if os.path.exists(dname):
            raise OSError(f'ERROR! Target dir "{dname}" already exists but is not a directory!')
        print(f'Making dir {dname}...')
        os.makedirs(dname)

def doKeywordsEscape(s):
    return p_key_escape.subn(r'\1', s)[0]

def getFileSHA256(fname, sha_obj=None):
    if sha_obj is None:
        sha_obj = hashlib.sha256()

    with open(fname, 'rb') as fin:
        for data in iter(lambda: fin.read(8192), b''):
            sha_obj.update(data)
        return sha_obj.hexdigest()

def evalExpr(expr_op_stack:list, vdict:dict):
    r''' Evaluate the processed expression op stack with given var value dict.

    Example:
        expr = 32 + (dtype==5)*32 + (dtype==6)*96
        expr_op_stack = [ 32, "dtype", 5, "==", 32, "*", "+", "dtype", 6, "==", 96, "*", "+" ]
        ast:
                       +
                      /  \
                    +      \
                   / \       \
                 32   *        *
                    /  \      /  \
                  ==   32   ==   96
                 /  \      /  \
               dtype 5   dtype 6

        vdict = {'dtype':6}
    '''
    vstack = []
    for v in expr_op_stack:
        if v == '==':
            r = vstack.pop()
            l = vstack.pop()
            vstack.append(l==r)
        elif v== '*':
            r = vstack.pop()
            l = vstack.pop()
            vstack.append(l*r)
        elif v== '+':
            r = vstack.pop()
            l = vstack.pop()
            vstack.append(l+r)
        elif v== '/':
            r = vstack.pop()
            l = vstack.pop()
            vstack.append(l//r)
        elif v== '-':
            r = vstack.pop()
            l = vstack.pop()
            vstack.append(l-r)
        elif v== '!=':
            r = vstack.pop()
            l = vstack.pop()
            vstack.append(l!=r)
        elif v== '&&':
            r = vstack.pop()
            l = vstack.pop()
            vstack.append(l and r)
        elif v== '||':
            r = vstack.pop()
            l = vstack.pop()
            vstack.append(l or r)
        elif v== '!':
            l = vstack.pop()
            vstack.append(not l)
        elif isinstance(v, str):
            vstack.append(vdict[v])
        else:
            vstack.append(v)

    return vstack[0]

## class to regularize markdown contents
class MarkdownTextRegularizer:
    @staticmethod
    def isTableSeparator(line):
        """
        Check if a line is a table separator (header separator).
        Args:
            line (str): A line from the Markdown file.
        Returns:
            bool: True if the line is a table separator, False otherwise.
        """
        # Table separator contains '|' and '-' characters
        return '|' in line and '-' in line and re.match(r'^\s*\|.*-\s*\|.*$', line)

    @staticmethod
    def isTableDataLine(line):
        """
        Check if a line is a table data line.
        Args:
            line (str): A line from the Markdown file.
        Returns:
            bool: True if the line is a table data line, False otherwise.
        """
        # Table data line starts and ends with '|' or contains '|'
        # but is not a separator line
        return (line.strip().startswith('|') and line.strip().endswith('|')) or \
                '|' in line and not MarkdownTextRegularizer.isTableSeparator(line)
    
    @staticmethod
    def isBlankLine(line):
        """
        Check if a line is blank or contains only whitespace.
        Args:
            line (str): A line from the Markdown file.
        Returns:
            bool: True if the line is empty, False otherwise.
        """
        return line.strip() == ""

    @staticmethod
    def isTableLine(line):
        """
        Check if a line is any kind of table line (separator or data).
        Args:
            line (str): A line from the Markdown file.
        Returns:
            bool: True if the line is a table line, False otherwise.
        """
        return MarkdownTextRegularizer.isTableSeparator(line) or \
               MarkdownTextRegularizer.isTableDataLine(line)
    
    @staticmethod
    def isListLine(line):
        """
        Check if a line is a list item line.
        A list item line starts with '-' followed by at least one space.
        Args:
            line (str): A line from the Markdown file.
        Returns:
            bool: True if the line is a list item line, False otherwise.
        """
        stripped_line = line.lstrip()
        return stripped_line.startswith('- ') or stripped_line.startswith('-\t')

    @staticmethod
    def insertBlankLines(markdown_content):
        """
        Insert blank lines before table/list if there's none
        Args:
            markdown_content (str): The content of the Markdown file.
        Returns:
            str: The modified Markdown content with blank lines inserted.
        """
        lines = markdown_content.splitlines()
        modified_lines = []
        
        ## 1.recognize every table line and list line
        ## 2.if previous line is not blank line, insert an blank line before
        for i, line in enumerate(lines):
            # if MarkdownTextRegularizer.isTableLine(line) or \
            #    MarkdownTextRegularizer.isListLine(line):
            #     if i > 0 and not MarkdownTextRegularizer.isBlankLine(lines[i-1]):
            if MarkdownTextRegularizer.isTableLine(line) and \
               (i > 0 and not MarkdownTextRegularizer.isTableLine(lines[i-1])):
                if not MarkdownTextRegularizer.isBlankLine(lines[i-1]):
                    ## insert a blank line before table header
                    modified_lines.append("")
            if MarkdownTextRegularizer.isListLine(line) and \
               (i > 0 and not MarkdownTextRegularizer.isListLine(lines[i-1])):
                if not MarkdownTextRegularizer.isBlankLine(lines[i-1]):
                    ## insert a blank line before a list
                    modified_lines.append("")
            ## Add the current line
            modified_lines.append(line)
        ## over
        return "\n".join(modified_lines)


class ASLFileContentProvider:
    def __init__(self, fname):
        self.mFilename = fname
        with open(fname, 'r', encoding='utf8') as fin:
            self.mLines = fin.readlines() # original lines

        self.mPLines = []  # processed lines
        for iline, line in enumerate(self.mLines):
            sline = ASLParser.stripComment(line) #
            sline = re.subn(r'\s+', ' ', sline)[0] # convert all whitespaces ( \t\r\n) to ONE simple space

            pline = sline
            pline = p_InsignificantSpace.subn('', sline)[0]   # remove useless spaces

            res = p_pkey_line.match(pline)
            if res is not None and res.group('KeyName') not in ALL_KEYS:
                ckeys = ','.join(get_close_matches(res.group('KeyName'), ALL_KEYS, n=5))
                raise ASLParseError(f'Unknown keyword in {self.mFilename}:{iline+1} : {line} You probably mean : {ckeys} ?')
            self.mPLines.append(pline)

        # TODO? just member sugar for fast accessing
        self.L = self.mLines
        self.P = self.mPLines

    def enumLines(self, l0, l1):
        for lidx in range(l0, l1):
            yield lidx, self.L[lidx]

    def enumPLines(self, l0, l1):
        for lidx in range(l0, l1):
            yield lidx, self.P[lidx]

    def getLine(self, lidx):
        return self.mLines[lidx]

    def getLines(self, l0, l1):
        return self.mLines[l0:l1]

    def getPLine(self, lidx):
        return self.mPLines[lidx]

    def getPLines(self, l0, l1):
        return self.mPLines[l0:l1]

    def lines(self):
        return self.mLines

    def plines(self):
        return self.mPLines

    def emitParseError(self, lidx, msg):
        # show original line, not processed lines
        raise ASLParseError(f'Error parsing {self.mFilename}:{lidx+1} : {self.L[lidx]}\n{msg}\n')

    def getFilename(self):
        return self.mFilename

class ASLGroup:
    def __init__(self, name:str, gtype:str, data:dict, metadata:dict=None):
        '''
        gtype: {'Group','Optype','Opcode'}  # Group > Optype > Opcode
        data: {member0:content0, member1:content1, ...}
        metadata: {'inherit':[g0, g1], 'def': ({fname},{line}), ...}
        '''

        self.mName = name
        self.mType = gtype
        self.mData = data
        self.mMetadata = metadata

    def __str__(self):
        s = f'ASLGroup(name={self.mName}, type={self.mType})\n'
        s += '  Metadata:\n' + str(self.mMetadata) + '\n'
        s += '  Data:\n' + str(self.mData) + '\n'
        return s

    def toDict(self):
        return {'class':'ASLGroup', 'name':self.mName, 'type':self.mType, 'metadata':self.mMetadata, 'data':self.mData}

    def getDef(self):
        if 'def' in self.mMetadata:
            fdef = self.mMetadata['def']
            return f'{fdef[0]}:{fdef[1]}'
        else:
            return ''

class ASLBitFieldType:
    def __init__(self, name:str, bitwidth:int, metadata:dict, vtype='Enum'):
        ''' vtype: {'Enum', 'Prop', 'UImm', 'SImm', 'F32Imm', 'F64Imm', 'F16Imm', 'ReqSB'}

        '''
        self.mName = name
        self.mBitWidth = bitwidth
        self.mMetadata  = metadata

        self.mVType = vtype
        self.mMaxV = (1<<bitwidth) - 1
        self.mKV = {}
        self.mVK = {}

    def push(self, k:str, v:int, *, return_exception=False):
        if self.mVType not in {'Enum', 'Prop'}:
            e = TypeError(f'Only Enum/Prop type BitFieldType can be pushed value! name={self.mName}, vtype={self.mVType}')
        elif v<0 or v>self.mMaxV:
            e = ValueError(f'Error! Out of range value {k}={v:#x} within bitwidth {self.mBitWidth}!')
        elif k in self.mKV:
            e = KeyError(f'Error! Duplicate key {k}={v:#x} !!! Prev val={self.mKV[k]:#x}')
        elif v in self.mVK:
            e = ValueError(f'Error! Conflict value {k}={v}!!! Prev key={self.mVK[v]}')
        else:
            e = None

        if e is not None:
            if return_exception:
                return e
            else:
                raise e

        self.mKV[k] = v
        self.mVK[v] = k
        return e

    def __setitem__(self, k:str, v:int):
        self.push(k, v)

    def __getitem__(self, k:str):
        if self.mVType in {'Enum', 'Prop'}:
            return self.mKV[k]
        else:
            return self.getImmVal(k)

    def getImmVal(self, k:str):
        # TODO: get hex value according to input imm
        if k.startswith('0x'):
            return int(k, 16)
        return None

    def __contains__(self, k):
        if self.mVType in {'Enum', 'Prop'}:
            return k in self.mKV
        else:
            if not k.startswith('0x'):
                return False

            v = int(k, 16)
            return v<=self.mMaxV

    def __str__(self):
        s = f'ASLBitFieldType(name={self.mName}, bitwidth={self.mBitWidth}, vtype={self.mVType},\n  metadata={self.mMetadata})\n'
        if self.mVType in {'Enum', 'Prop'}:
            s += '  Enum values : \n'
            for k, v in self.mKV.items():
                s += f'    {k} = {v:#x};\n'
        return s

    def getDef(self):
        if 'def' in self.mMetadata:
            fdef = self.mMetadata['def']
            return f'{fdef[0]}:{fdef[1]}'
        else:
            return ''

    def toDict(self):
        return {'class':'ASLBitFieldType', 'name':self.mName, 'bitwidth':self.mBitWidth, 'metadata':self.mMetadata, 'vtype':self.mVType, 'KV':self.mKV}

class ASLBitField:
    def __init__(self, field_range, ftype, fvar, fval=None, src=None):
        '''
            field_range: [(bv0, bvn), (bv1, bvn), ...]
            ftype: field type, must be ASLBitFieldType
            fvar: field var name
            fval: field value set,
                None/'' for not set;
                '=val' for setting default value;
                '==val' for setting constant value;
            src: the group field declared, for tracing source of bitfield.
        '''

        self.mRange = field_range
        self.mType = ftype
        self.mVar = fvar
        self.mVal = fval
        self.mSrc = src
        self.mSetBits = self.getSetBits()  # NOTE: always update mSetBits when range is changed. Use property???

    def sameField(self, bf:"ASLBitField"):
        '''Same bitfield definition, but may have different value or source.'''

        return bf.mSetBits == self.mSetBits and bf.mType == self.mType and bf.mVar == self.mVar

    def getValueLevel(self):
        if self.mVal is None or len(self.mVal)==0:
            vl = 0
        elif self.mVal.startswith('=='):
            vl = 2
        else:
            vl = 1
        return vl

    def canOverrideValue(self, bf:"ASLBitField"):
        ''' Check whether current bit field can override input bitfield value.

        The bitfield value could be in 3 types, listed from weaker to stronger:

            NotSet   => mVal=None/''
            Default  => mVal="=val"
            Constant => mVal="==val"
        '''

        bfv = bf.getValueLevel()
        sfv = self.getValueLevel()

        if sfv > bfv:    # self stronger, overide weaker
            return True
        elif sfv==bfv and (sfv==0 or sfv==1): # merge same field
            return True
        else: # self weaker or both set constant
            return False

    def getBitWidth(self):
        vsum = 0
        for _, vn in self.mRange:
            vsum += vn

        return vsum

    def getSetBits(self):
        m = 0
        for v0, vn in self.mRange:
            m += ((2**vn)-1)<<v0
        return m

    def toDict(self):
        return {'class':'ASLBitField', 'range':self.mRange, 'type':self.mType, 'var':self.mVar, 'val':self.mVal, 'src':self.mSrc}

    def __repr__(self):
        return self.__str__()

    def __str__(self):
        return f'ASLBitField(Range={self.mRange}, Type={self.mType}, Var={self.mVar}, Val="{self.mVal}", Src={self.mSrc})'

class BitFieldMergeError(Exception):
    pass

class ASLEncoding:
    def __init__(self, name, metadata=None):
        self.mName = name
        self.mFields = {}
        self.mSetBits  = 0
        if metadata is not None:
            self.mMetadata = metadata
        else:
            self.mMetadata = {}

        self.mFieldConstants = {}
        self.mFieldDefaults  = {}

    def push(self, bf:ASLBitField, *, return_exception=False):
        var = bf.mVar
        if var in self.mFields:
            e = KeyError(f'Redefined var {var}!')
        elif (bf.mSetBits & self.mSetBits) != 0:
            e = ValueError(f'Overlapped bit field range {var}!')
        else:
            e = None

        if e is not None:
            if return_exception:
                return e
            else:
                raise e

        self.mFields[var] = bf
        self.mSetBits = self.mSetBits | bf.mSetBits

        return e

    def sortFields(self):
        fv = [(bf.mRange[0][0], bfname) for bfname, bf in self.mFields.items()]
        fv.sort()
        nfields = {}
        for _, bfname in fv:
            nfields[bfname] = self.mFields[bfname]

        self.mFields = nfields

    def merge(self, enc:"ASLEncoding"):
        '''Merge bitfields from another ASLEncoding.'''

        emsg = f'Error when merging encoding {enc.mName}({enc.getDef()}) into {self.mName}({self.getDef()})!\n'
        for bfname, bf in enc.mFields.items():
            if bfname in self.mFields: # same varname found
                cbf = self.mFields[bfname]
                if not bf.sameField(cbf):
                    raise BitFieldMergeError(f'{emsg}Conflict bit field var {cbf.mVar}!\n  Dst : {cbf}\n  Src : {bf}\n')
                else:
                    if not cbf.canOverrideValue(bf):
                        raise BitFieldMergeError(f'{emsg}Invalid to override input field values for {cbf.mVar}!\n  Dst : {cbf}\n  Src : {bf}\n')
                    elif cbf.mSrc==bf.mSrc:  # Ignore field inherit from same source
                        pass
                    else:
                        print(f"Overriding field {cbf.mVar}!\n  Dst : {cbf}\n  Src : {bf}\n")
                        self.mFields[bfname] = cbf
            else:
                if bf.mSetBits & self.mSetBits > 0:
                    raise BitFieldMergeError(f'{emsg}Overlapping bit field from {bf.mVar}!\n ')
                self.mFields[bfname] = bf

            self.mSetBits = self.mSetBits | bf.mSetBits

    def fillValues(self, repos):
        self.mFieldConstants = {}
        self.mFieldDefaults  = {}

        msg = f"{self.getDef()} : "
        for bfname, bf in self.mFields.items():
            vl = bf.getValueLevel()
            if  vl == 2 or vl == 1:
                if bf.mType not in repos:
                    msg += f'Error! Unknown bitfield type {bf.mType} used in {bf}!!!'
                    raise Exception(msg)
                bftype = repos[bf.mType]

                sval = bf.mVal.lstrip('=') # strip leading "=="/"="
                if sval not in bftype:
                    msg += f'Error! Unknown bitfield value {sval} used in {bf}!!! \n{bftype.getDef()} Available KV: {bftype.mKV}'
                    raise Exception(msg)

                val = bftype[sval]
                if val is None:
                    msg += f'Error! Cannot assign value to bitfield {bf}!!! {bftype.getDef()}'
                    raise Exception(msg)
                else:
                    if vl==2:
                        self.mFieldConstants[bfname] = val
                    else:
                        self.mFieldDefaults[bfname] = val

    def draw(self, save_name=None, title=None):
        if save_name is not None:
            print(f'Drawing {save_name}...')

        fig = plt.figure(16807, figsize=(16, 2))
        plt.clf()
        ax = fig.subplots(1, 1, sharex=True, sharey=True,
                        subplot_kw=dict(aspect=1),
                        gridspec_kw=dict(hspace=0, wspace=0))

        xticks = list(range(0, 65, 8))
        xticklabels = [str(i) for i in xticks]
        xticklabels.reverse()
        ax.set(xticks=xticks, xticklabels=xticklabels, yticks=[], xlim=[0, 64], ylim=[0, 3])
        ax.tick_params(top=True, labeltop=True, bottom=False, labelbottom=False)
        # ax.tick_params(top=False, labeltop=False, bottom=True, labelbottom=True)
        # subplot(111, subplot_kw=dict(aspect=1), gridspec_kw=dict(hspace=0, wspace=0))

        for i in range(1, 64):
            lw = 1 if i%8==0 else 0.5
            ax.axvline(i, ymin=2/3, ymax=1, linewidth=lw, color='k')

        ax.axvline(0, ymin=0, ymax=1, linewidth=0.5, color='k')
        ax.axvline(64, ymin=0, ymax=1, linewidth=0.5, color='k')

        ax.axhline(0, linewidth=0.5, color='k')
        ax.axhline(1, linewidth=0.5, color='k')
        ax.axhline(2, linewidth=1, color='k')
        ax.axhline(3, linewidth=1, color='k')

        # draw bit position
        for i in range(64):
            ax.text(i+0.5, 2.5, f'{7-(i%8)}', horizontalalignment='center', verticalalignment='center')

        for i in range(0, 65, 8):
            ax.text(i, -0.65, f'{128-i}', horizontalalignment='center', verticalalignment='center')
            l0 = Line2D([i, i], [-0.2,0], lw=0.75, color='k')
            l0.set_clip_on(False)
            ax.add_line(l0)

        rep_labels = {}
        # ['Calibri', 'Cambria', 'Yu Gothic', ]
        #             00000000000000000000
        rep_marker = '①②③④⑤⑥⑦⑧⑨⑩⑪⑫⑬⑭⑮⑯⑰⑱⑲⑳'
        font_dict = {'fontsize':12, 'color':'k', 'family':['Consolas']} # Consolas

        zlist = getUnusedBitsList(self.mSetBits)
        # print(zlist)
        for z0, z1 in zlist:
            x0 = 64-(z0%64)
            x1 = 63-((z1-1)%64)
            yp = 1 - z0//64
            ax.fill([x0, x0, x1, x1], [yp, yp+1, yp+1, yp], 'darkgrey')

        # TODO: split field spanning 2 lines. Done！
        # TODO: discontinous fields? Done!
        def splitVal(val, vns):
            '''Splitting values into a set of bitfields'''
            acc_vn = vns[0]
            masks = [((1<<acc_vn) - 1, 0)]
            for vn in vns[1:]:
                masks.append( ( ((1<<vn)-1)<<acc_vn , acc_vn) )
                acc_vn += vn

            if val is not None:
                return tuple([(val & m)>>shf for m,shf in masks])
            else:
                return tuple([None]*len(vns))

        bflist = [] # (v0, vn, var, val)
        for bfname, bf in self.mFields.items():
            var = bfname
            if var in self.mFieldConstants:
                val = self.mFieldConstants[var]
            else:
                val = None

            vs = []
            for v0, vn in bf.mRange:
                v1 = v0 + vn - 1
                if v0<64 and v1>=64:
                    vs.append((v0, 64-v0))
                    vs.append((64, v1-63))
                else:
                    vs.append((v0, vn))

            vals = splitVal(val, [vn for _, vn in vs])
            for (v0, vn), val in zip(vs, vals):
                bflist.append((v0, vn, var, val))

        for v0, vn, var, val in bflist:
            ve = v0 + vn
            if v0<64:
                vp = v0
                yp = 1.5
            else:
                vp = v0 - 64
                yp = 0.5

            xp = vp + vn/2

            if v0<64:
                ax.axvline(64-v0, 1/3, 2/3, linewidth=0.5, color='k')
            else:
                ax.axvline(128-v0, 0, 1/3, linewidth=0.5, color='k')

            if ve<64:
                ax.axvline(64-ve, 1/3, 2/3, linewidth=0.5, color='k')
            else:
                ax.axvline(128-ve, 0, 1/3, linewidth=0.5, color='k')

            # fill value if it's constant
            if val is not None:
                bval = ('{v:0' + str(vn) +'b}').format(v=val)[::-1]
                for bv, iv in zip(bval, range(vn)):
                    ax.text(64-(vp+iv+0.5), yp, bv, horizontalalignment='center', verticalalignment='center')
                continue

            if len(var)*0.49>vn:
                if var in rep_labels:
                    repid = rep_labels[var]
                else:
                    repid = len(rep_labels)
                    rep_labels[var] = repid

                ax.text(64-xp, yp, rep_marker[repid], horizontalalignment='center', verticalalignment='center', fontdict=font_dict)
            else:
                ax.text(64-xp, yp, var, horizontalalignment='center', verticalalignment='center', fontdict=font_dict)

        # draw rep labels
        xp = 0
        yp = -1.5
        for var, repid in rep_labels.items():
            m = rep_marker[repid] + '=' + var
            ax.text(xp, yp, m, horizontalalignment='left', verticalalignment='center', fontdict=font_dict)
            xp += len(m)*0.4 + 1
            if xp>63:
                xp = 0
                yp -= 1

        if title is None:
            title = self.mName
        plt.title(title)
        plt.tight_layout()
        if save_name is not None:
            plt.savefig(save_name, dpi=180)
        else:
            plt.show()

    def checkBitWidth(self, repos):
        for bfname, bf in self.mFields.items():
            if bf.mType not in repos:
                emsg = f'Unknown BitFieldType {bf.mType}!'
                emsg += f'  Encoding {self.mName} defined in: {self.getDef()}\n'
                raise ASLPostProcessError(emsg)

            if repos[bf.mType].mBitWidth != bf.getBitWidth():
                emsg = 'Encoding bitwidth not match!\n'
                emsg += f'  Type {bf.mType} has bitwidth {repos[bf.mType].mBitWidth}. Def: {repos[bf.mType].getDef()}\n'
                emsg += f'  Var {bfname} has bitwidth {bf.getBitWidth()}. Def: {self.getDef()}\n'
                raise ASLPostProcessError(emsg)

    def checkAssociatedFields(self):
        for bf, bfv in self.mFields.items():
            ids = [m.start() for m in re.finditer('\\.', bf)]
            if len(ids)==0:
                continue
            elif len(ids)>1:
                raise ASLPostProcessError(f'Invalid associated field name "{bf}" in {self.getDef()}!')

            vname = bf[:ids[0]]
            opname = bf[ids[0]+1:]
            if vname not in self.mFields:
                raise ASLPostProcessError(f'Unknown associated var "{vname}" for "{bf}" in {self.getDef()}!')
            elif opname not in ASSOCIATED_ATTRS:
                raise ASLPostProcessError(f'Unknown associated operation "{opname}" for "{bf}" in {self.getDef()}!')

    def enumerateOpModi(self, operand_info:"ASLOperandInfo", bftypes:dict):
        opr_set = set()
        for v in operand_info.mPData['Order']:
            if isinstance(v, list):
                opr_set.update(v[1:])
            else:
                opr_set.add(v)

        opr_set.update(['optype', 'stype', 'req', 'wsb', 'sched', 'trap'])
        opmodis = []

        # TODO: modi_order
        if 'ModiOrder' in operand_info.mPData:
            opmodis.extend(operand_info.mPData['ModiOrder'])

        for bf, bfv in self.mFields.items():
            if bf in opr_set or '.' in bf or bf in opmodis:
                continue
            else:
                opmodis.append(bf)

        opmodi_klist = []
        for bf in opmodis:
            bfv = self.mFields[bf]
            bftype = bfv.mType
            klist = list(bftypes[bftype].mKV.keys())
            if bfv.getValueLevel()==1:
                klist.remove(bfv.mVal.strip('= '))
                klist.insert(0, '')
            elif bfv.getValueLevel()==2:
                klist = [bfv.mVal.strip('= ')]

            opmodi_klist.append([('.'+k) if len(k)!=0 else '' for k in klist])

        optype = self.mFields['optype'].mVal.strip('= ')
        if len(opmodi_klist) == 0:
            yield optype
        else:
            for ks in itertools.product(*opmodi_klist):
                yield optype + (''.join(ks))

    def enumerateFunc(self, operand_info:"ASLOperandInfo", excep:"ASLException", bftypes:dict):
        optype = self.mFields['optype'].mVal.strip(' =')

        opr_set = set()
        func_imm_bf = []
        for v in operand_info.mPData['Order']:
            if isinstance(v, list):
                opr_set.update(v[1:])
            else:
                opr_set.add(v)
                if f'{optype}.{v}' in FUNC_IMM_BFS:
                    func_imm_bf.append(v)

        opr_set.update(['optype', 'stype', 'req', 'wsb', 'sched', 'trap'])
        func_modis = []

        # TODO: modi_order
        if 'ModiOrder' in operand_info.mPData:
            func_modis.extend(operand_info.mPData['ModiOrder'])

        for bf, bfv in self.mFields.items():
            if bf in opr_set or ('.' in bf and bfv.mType not in {'ADDRMode', 'RStride'}) or bf in func_modis:
                continue
            else:
                func_modis.append(bf)
        func_modis.extend(func_imm_bf)

        # TODO: BMOV_R_B/BMOV_B_R ???
        opfunc_klists = []
        opfunc_vdicts = []
        for bf in func_modis:
            bfv = self.mFields[bf]
            bftype_name = bfv.mType
            bftype = bftypes[bftype_name]
            if bftype.mVType == 'Enum':
                if bfv.mType  in {'ADDRMode', 'RStride'}:
                    klist = [(bf, f'({bf}={k})') for k in bftype.mKV]
                    vdict = {f'({bf}={k})':(v, bfv.mRange[0][0]) for k,v in bftype.mKV.items()}
                else:
                    klist = [(bf, k) for k in bftype.mKV]
                    vdict = {k:(v, bfv.mRange[0][0]) for k,v in bftype.mKV.items()}
            else:
                klist = [(bf, f'{bf}={k:#x}') for k in range(bftype.mMaxV+1)]
                vdict = {f'{bf}={v:#x}':(v, bfv.mRange[0][0]) for v in range(bftype.mMaxV+1)}

            opfunc_klists.append(klist)
            opfunc_vdicts.append(vdict)

        v_optype = self.mFieldConstants['optype']
        if optype in OPTYPES_FUNC_WITH_STYPE:
            v_optype += self.mFieldConstants['stype']<<8

        # TODO: VSel/HSel/BSel are not in func modi list, but may appear in exception expressions
        #       Thus just set them to default value as "0" here, usually safe.
        bfv_basedict = copy.deepcopy(self.mFieldConstants)
        for bf, bfv in self.mFields.items():
            if bfv.mType in {'VSel', 'HSel', 'BSel'}:
                bfv_basedict[bf] = 0

        optype = self.mFields['optype'].mVal.strip('= ')
        if len(opfunc_klists) == 0:  # No extra field, only optype
            yield v_optype, optype
        else:
            for ks in itertools.product(*opfunc_klists):
                vsum = v_optype
                bfv_dict = copy.deepcopy(bfv_basedict)
                for i, (bf, ktag) in enumerate(ks):
                    vd = opfunc_vdicts[i]
                    bfval, vpos = vd[ktag]
                    bfv_dict[bf] = bfval
                    vsum += bfval << vpos

                flag, msg = self.checkEncodingError(bfv_dict, operand_info, excep, bftypes, check_modi_only=True)
                func_tag = optype + '.' + ('.'.join([ktag for _, ktag in ks]))
                if not flag:
                    yield None, f'{func_tag}. Reason = {msg}'
                else:
                    yield vsum, func_tag

    def checkEncodingError(self, bfval_dict:dict, operand_info:"ASLOperandInfo", excep:"ASLException", bftypes:dict, maxreg=254, *, check_modi_only=False):
        # check constant fields "BFType var == Val"
        for k,v in self.mFieldConstants.items():
            if bfval_dict[k] != v:
                return False, f'Unmatched encoding constant field "{k}=={v:#x}" : got {k}={bfval_dict[k]:#x}!'

        # check field comb
        for ename, elist in excep.mPData['EncodingError'].items():
            slist = excep.mData['EncodingError'][ename]
            for i, (esrc, reason, estack) in enumerate(elist):
                _, _, estr  = slist[i]
                if evalExpr(estack, bfval_dict):
                    return False, f'{ename}! Reason: {reason}, Expr = {estr}, Src = {esrc}.'

        # just check modifiers, for enumerating valid funcs.
        # TODO: sometimes field comb may also need operands?
        if check_modi_only:
            return True, "No error found!"

        bw_dict =  operand_info.evalBitwidth(bfval_dict)
        rlen_dict = {k:(v+31)//32 for k,v in bw_dict.items() }

        ldc_optype_set = {bftypes['Optype'].mKV['LDC'], bftypes['Optype'].mKV['ULDC']}

        # Check operand correctness
        for bf, bfv in bfval_dict.items():
            bftype = self.mFields[bf].mType

            # check GPR/UGPR index in range and alignment
            if bftype == 'Reg' and bfv != 0xff: # 0xff = RZ
                if bfv + rlen_dict[bf]-1 > maxreg:
                    return False, f"Out of range GPR index R[{bfv}:{bfv+rlen_dict[bf]-1}] vs maxreg={maxreg}!"
                if bfv % rlen_dict[bf] != 0:
                    return False, f"Misaligned GPR index R[{bfv}:{bfv+rlen_dict[bf]-1}] with len {rlen_dict[bf]}!"

            elif bftype == 'UReg':
                if bfv % rlen_dict[bf] != 0:
                    return False, f"Misaligned UGPR index UR[{bfv}:{bfv+rlen_dict[bf]-1}] with len {rlen_dict[bf]}!"

            # check constant memory alignment
            elif bftype == 'CMem' and bfval_dict['optype'] not in ldc_optype_set:
                if bfv % (rlen_dict[bf]*4) != 0:
                    return False, f"Misaligned CMem immediate {bfv:#x} with {rlen_dict[bf]*4}B!"

            # check field value
            if bftypes[bftype].mVType == 'Enum':
                if bfv not in bftypes[bftype].mVK:
                    return False, f"Invalid field value {bf} = {bfv:#x}!"

        return True, 'No error found!'

    def toDict(self):
        return {'class':'ASLEncoding', 'name':self.mName, 'metadata':self.mMetadata,
                'fields':self.mFields, 'field_constants':self.mFieldConstants,
                'set_bits':hex(self.mSetBits)}

    def __repr__(self):
        return str(self)

    def __str__(self):
        bs = f'{self.mSetBits:0128b}' # TODO: var length?
        return f'ASLEncoding:{self.mName}: {bs} \n  ' + ('\n  '.join([str(v) for _,v in self.mFields.items()]))

    def getDef(self):
        if 'def' in self.mMetadata:
            fdef = self.mMetadata['def']
            return f'{fdef[0]}:{fdef[1]}'
        else:
            return ''

class OperandInfoParseError(Exception):
    pass

class ASLOperandInfo:
    def __init__(self, name, metadata=None):
        self.mName = name
        if metadata is not None:
            self.mMetadata = metadata
        else:
            self.mMetadata = {}

        self.mData = {}
        self.mPData = {}

    def parseInfo(self, enc:ASLEncoding, repos:dict):
        self.mPData = {}
        for attr, vstr in self.mData.items():
            if attr in OPERANDINFO_LISTS:
                self.mPData[attr] = self.__parseList(attr, vstr, enc)
            elif attr=='Bitwidth':
                d = {}
                for k, v in self.mData['Bitwidth'].items():
                    d[k] = self.__parseExpr(attr, v, enc, repos)
                self.mPData[attr] = d
            elif attr=='AsmFormat': # only for AsmFormat
                p = {}
                for k,s in vstr.items():
                    res=p_asmformat.match(s)
                    if res is None:
                        raise OperandInfoParseError(f'Invalid asm format string "{s}" ! {self.getDef()}')

                    func, var, modi = res.group('Func'), res.group('Var'), res.group('Modi')
                    if func not in ASMFORMAT_FUNCS:
                        raise OperandInfoParseError(f'Invalid asm format func "{func}" ! {self.getDef()}. Valid list: {ASMFORMAT_FUNCS} ')
                    if var not in enc.mFields:
                        raise OperandInfoParseError(f'Undefined asm format var "{var}" ! {self.getDef()}.')

                    if func != 'CvtIShift':
                        if modi not in enc.mFields:
                            raise OperandInfoParseError(f'Undefined asm format modi "{modi}" ! {self.getDef()}.')
                    else:
                        try:
                            modi = int(modi)
                        except ValueError:
                            raise OperandInfoParseError(f'Invalid IShift value "{modi}"! {self.getDef()}.')
                    p[k] = func, var, modi

                self.mPData[attr] = p
            else:
                self.mPData[attr] = vstr

    def __parseList(self, attr, tstr:str, enc:ASLEncoding):
        if tstr is None:
            return []
        tstr = tstr.strip()
        if len(tstr)==0:
            return []

        t = ast.parse('[' + tstr + ']', mode='eval')
        tlist = []
        for elt in t.body.elts:
            if isinstance(elt, ast.Name):
                if elt.id not in enc.mFields and elt.id not in {'PR', 'UPR'}:
                    raise OperandInfoParseError(f'{self.getDef()} : Undefined operand ({elt.id}) in list {attr}<{tstr}>!')
                tlist.append(elt.id)
            elif isinstance(elt, ast.Subscript):
                sid = elt.value.id
                if sid not in ['M', 'R', 'UR', 'C']:
                    raise OperandInfoParseError(f'{self.getDef()} : Invalid group ({sid}) in list {tstr}!')
                s_elts = [e.id for e in elt.slice.elts]
                tlist.append([sid, *s_elts])
            elif isinstance(elt, ast.Attribute):
                sid = elt.value.id
                sattr = elt.attr
                tlist.append(sid+'.'+sattr)
            else:
                raise OperandInfoParseError(f'{self.getDef()} : Invalid ast type({type(elt)}) in operand list!')

        return tlist

    def __parseExpr(self, attr, tstr:str, enc:ASLEncoding, type_repos:dict):
        if tstr is None or len(tstr.strip())==0:
            return []

        t = ast.parse(tstr, mode='eval')
        if not isinstance(t, ast.Expression):
            raise OperandInfoParseError(f'Invalid ast expr! {attr} = {tstr}')

        vstack = []
        def push_v(v, b_literal=False):
            if b_literal:
                vstack.append(v)
                return

            if isinstance(v, ast.Name):
                vstack.append(v.id)
            elif isinstance(v, ast.Constant):
                vstack.append(v.value)
            elif isinstance(v, ast.BinOp):
                push_v(v.left)
                push_v(v.right)
                vop = v.op.__class__.__name__
                if vop not in AST_OP_DICT:
                    raise OperandInfoParseError(f'{self.getDef()} : Invalid BinOp({vop}) in operand info expr! {attr} = {tstr}')
                vstack.append(AST_OP_DICT[vop])
            elif isinstance(v, ast.Compare):
                if isinstance(v.left, ast.Name):
                    lid = v.left.id
                elif isinstance(v.left, ast.Attribute):
                    lid = v.left.value.id + '.' + v.left.attr
                else:
                    raise OperandInfoParseError(f'{self.getDef()} : Only bitfield variable can be lhs of cmp expr! {attr} = {tstr}')
                #
                if lid not in enc.mFields:
                    raise OperandInfoParseError(f'{self.getDef()} : Unknown var {lid} in operand info expr! {attr} = {tstr}')

                vstack.append(lid)

                bftype = enc.mFields[lid].mType
                bftv = type_repos[bftype]

                cmp_v = v.comparators[0].value
                if cmp_v not in bftv.mKV:
                    raise OperandInfoParseError(f'{self.getDef()} : Unknown enum {cmp_v} for {bftype} ! ({bftv.getDef()})')
                vstack.append(bftv.mKV[cmp_v])

                cmp_op = v.ops[0].__class__.__name__
                if cmp_op not in AST_OP_DICT:
                    raise OperandInfoParseError(f'{self.getDef()} : Invalid CmpOp({cmp_op}) in operand info expr! {attr} = {tstr}')
                vstack.append(AST_OP_DICT[cmp_op])
            elif isinstance(v, ast.Attribute):
                vf = v.value.id
                vattr = v.attr
                vstack.append(vf + '.' + vattr)
            else:
                raise OperandInfoParseError(f'{self.getDef()} : Invalid expr op {v} in {attr} = {tstr}!')

        push_v(t.body)

        return vstack

    def checkAttr(self, enc:ASLEncoding):
        for k in {'InList', 'OutList', 'Order'}:
            if k not in self.mData:
                raise ASLPostProcessError(f'No attribute {k} found in OperandInfo! {self.getDef()}')

        ioset = set([*self.mData['InList'].split(','), *self.mData['OutList'].split(',')])
        if '' in ioset:  # remove empty list obj
            ioset.remove('')
        ordset = set()
        for v in self.mPData['Order']:
            if isinstance(v, list):
                ordset.update(v[1:])
            else:
                ordset.add(v)
        # print(ordset)

        for v in ordset:
            if v not in enc.mFields and v not in {'PR', 'UPR'}:
                raise ASLPostProcessError(f'Undefined operand "{v}" in {self.getDef()}!')

        for v in ioset:
            if v not in enc.mFields and v not in {'PR', 'UPR'}:
                raise ASLPostProcessError(f'Undefined operand "{v}" in {self.getDef()}!')

        for bfname, bfv in enc.mFields.items():
            if bfv.mType in {'Reg', 'UReg', 'Pred', 'UPred', 'BReg'}:
                if bfname not in ioset:
                    raise ASLPostProcessError(f'Unknown io for operand "{bfname}" in {enc.getDef()} for OperandInfo {self.getDef()}!')
                if bfname not in ordset:
                    raise ASLPostProcessError(f'Unknown order for operand "{bfname}" in {enc.getDef()} for OperandInfo {self.getDef()}!')

            if bfv.mType in {'Reg', 'UReg', 'CMem'}:
                if 'Bitwidth' not in self.mData or bfname not in self.mData['Bitwidth']:
                    raise ASLPostProcessError(f'Unknown bitwidth for operand "{bfname}" in {self.getDef()}!')

        vlist = []
        if 'Bitwidth' in self.mData:
            vlist.extend(self.mData['Bitwidth'].keys())
        if 'AsmFormat' in self.mData:
            vlist.extend(self.mData['AsmFormat'].keys())

        for v in vlist:
            if v not in enc.mFields:
                raise ASLPostProcessError(f'Undefined operand "{v}" in {self.getDef()}!')

    def merge(self, opinfo:"ASLOperandInfo"):
        '''merge operand info from parent op group'''

        for k, v in opinfo.mData.items():
            if k not in self.mData:
                self.mData[k] = v
            elif k in OPERANDINFO_DICTS:   # ignore overwritten InList/OutList/Order/ModiOrder
                for kk, kv in v.items():
                    if kk not in self.mData[k]:
                        self.mData[k][kk] = kv

    def checkModiOrder(self, enc:ASLEncoding, opv:ASLGroup, bftypes:dict):
        opr_set = set()
        for op in self.mPData['Order']:
            if isinstance(op, list):
                opr_set.update(op[1:])
            else:
                opr_set.add(op)

        xset = set(['optype', 'stype', 'req', 'wsb', 'shed', 'trap'])
        xset.update(opr_set)

        vdict = {}
        if 'ModiOrder' in self.mPData:
            modis = set(self.mPData['ModiOrder'])
        else:
            modis = set()

        # if 2 fields has same ENUM, their order must be defined in ModiOrder
        for bf, bfv in enc.mFields.items():
            bftype = bftypes[bfv.mType]
            if bf not in xset and '.' not in bf:
                for k, _ in bftype.mKV.items():
                    if k in vdict:
                        if bf not in modis or vdict[k] not in modis:
                            raise ASLPostProcessError(f"Modi conflict found! {bf}@{k} vs {vdict[k]}@{k} for Opcode {opv.mName}! {opv.getDef()}")
                    else:
                        vdict[k] = bf

        for m in modis:
            if enc.mFields[m].getValueLevel()==1:
                raise ASLPostProcessError(f'Bitfield {m} defined in ModiOrder should not set default value! {enc.getDef()}')

    def evalBitwidth(self, bf_vdict:dict):
        d = {}
        if 'Bitwidth' not in self.mPData or len(self.mPData['Bitwidth']) == 0:
            return d

        for k, ops in self.mPData['Bitwidth'].items():
            d[k] = evalExpr(ops, bf_vdict)

        return d

    def toDict(self):
        return {'class':'ASLOperandInfo', 'name':self.mName, 'metadata':self.mMetadata, 'data':self.mData, 'pdata':self.mPData}

    def __repr__(self):
        return str(self)

    def __str__(self):
        return f'ASLOperandInfo({self.mName})'

    def getDef(self):
        if 'def' in self.mMetadata:
            fdef = self.mMetadata['def']
            return f'{fdef[0]}:{fdef[1]}'
        else:
            return ''

class ExceptionParseError(Exception):
    pass

class ASLException:
    def __init__(self, name, metadata=None):
        '''

        __Exception
            EncodingError<ErrorType> = ErrorCondExpr;
            RuntimeException<ExceptionType>;          // No Expr for RuntimeException

        List of ISA Exceptions:
            - EncodingError
                - IllegalOpcode
                - IllegalBitFieldValue
                - IllegalBitFieldCombination
                - OutOfRangeGPRIndex
                - UnalignedGPRIndex
                - UnalignedUGPRIndex
                - UnalignedCMemImmediate
                - IllegalWriteSB
                - IllegalSchedule
            - RuntimeException
                - IllegalPC
                - IllegalBRUState
                - InvalidBARCount
                - InconsistentBARMode
                - InconsistentBARCount
                - InvalidWARPSYNCMemberMask
                - InvalidRTTState
                - InvalidTRAPIndex
                - InvalidSYSCALLState
                - IllegalMemoryAddress
                - UnalignedMemoryAddress
                - InvalidConstBank
                - InvalidGPRIndex
                - InvalidUGPRIndex
                - InvalidHWRegisterIndex
                - InvalidSWITCHState
        '''
        self.mName = name
        if metadata is not None:
            self.mMetadata = metadata
        else:
            self.mMetadata = {}

        # list of (etype, ename, cond_expr)
        # 'EncodingError': {ename0: (err_src, err_reason, err_expr), ...}
        # 'RuntimeException': [ ename0, ename1, ...]
        self.mData = {'EncodingError':{}, 'RuntimeException':[]}

        #
        self.mPData = {}

    def merge(self, excep:"ASLException"):
        for k, v in excep.mData['EncodingError'].items():
            if k in self.mData['EncodingError']:
                for vv in v:
                    if vv not in self.mData['EncodingError'][k]:
                        self.mData['EncodingError'][k].append(vv)
            else:
                self.mData['EncodingError'][k] = copy.deepcopy(v)

        for k in excep.mData['RuntimeException']:
            if k not in self.mData['RuntimeException']:
                self.mData['RuntimeException'].append(k)

    def parseData(self, enc:ASLEncoding, opgroups:dict, type_repos:dict):
        self.mPData = {'EncodingError':{}}
        self.mPData['RuntimeException'] = self.mData['RuntimeException']

        for k, elist in self.mData['EncodingError'].items():
            self.mPData['EncodingError'][k] = []
            for esrc, reason, expr in elist:
                vstack = self.parseExpr(expr, esrc, enc, opgroups, type_repos)
                self.mPData['EncodingError'][k].append((esrc, reason, vstack))

    def parseExpr(self, tstr:str, esrc:str, enc:ASLEncoding, opgroups:dict, type_repos:dict):
        if tstr is None or len(tstr.strip())==0:
            return []

        def_str = opgroups[esrc].mData['__Exception'].getDef()

        try:
            t = ast.parse(tstr, mode='eval')
        except:
            raise ExceptionParseError(f'{def_str} : Invalid ast expr! {tstr}')

        # print(ast.dump(t, indent=2))

        if not isinstance(t, ast.Expression):
            raise ExceptionParseError(f'{def_str} : Invalid ast expr! {tstr}')

        vstack = []
        def push_v(v, b_literal=False):
            if b_literal:
                vstack.append(v)
                return

            if isinstance(v, ast.Name):
                vstack.append(v.id)
            elif isinstance(v, ast.Constant):
                vstack.append(v.value)
            elif isinstance(v, ast.BinOp):
                push_v(v.left)
                push_v(v.right)
                vop = v.op.__class__.__name__
                if vop not in AST_OP_DICT:
                    raise ExceptionParseError(f'{def_str} : Invalid BinOp({vop}) in exception expr! {tstr}')
                vstack.append(AST_OP_DICT[vop])
            elif isinstance(v, ast.BoolOp):
                vop = v.op.__class__.__name__
                if vop not in AST_OP_DICT:
                    raise ExceptionParseError(f'{def_str} : Invalid BoolOp({vop}) in exception expr! {tstr}')

                for i, bop_vs in enumerate(v.values):
                    push_v(bop_vs)
                    if i>0:
                        vstack.append(AST_OP_DICT[vop])

            elif isinstance(v, ast.UnaryOp):
                push_v(v.operand)
                vop = v.op.__class__.__name__
                if vop not in AST_OP_DICT:
                    raise ExceptionParseError(f'{def_str} : Invalid UnaryOp({vop}) in exception expr! {tstr}')
                vstack.append(AST_OP_DICT[vop])
            elif isinstance(v, ast.Compare):
                if isinstance(v.left, ast.Name):
                    lid = v.left.id
                elif isinstance(v.left, ast.Attribute):
                    lid = v.left.value.id + '.' + v.left.attr
                else:
                    raise ExceptionParseError(f'{def_str} : Only bitfield variable can be lhs of cmp expr! {tstr}')
                #
                if lid not in enc.mFields:
                    raise ExceptionParseError(f'{def_str} : Unknown var {lid} in exception expr! {tstr}')

                vstack.append(lid)

                bftype = enc.mFields[lid].mType
                bftv = type_repos[bftype]

                if len(v.comparators)>1:
                    raise ExceptionParseError(f'{def_str} : Chain comparison is not supported! {tstr}')
                cmp = v.comparators[0]
                if isinstance(cmp, ast.Constant):
                    cmp_v = v.comparators[0].value
                    if cmp_v not in bftv.mKV:
                        raise ExceptionParseError(f'{def_str} : Unknown enum {cmp_v} for {bftype} in {tstr}! ({bftv.getDef()})')
                    vstack.append(bftv.mKV[cmp_v])
                elif isinstance(cmp, ast.Name):
                    rid = cmp.id
                    vstack.append(rid)
                elif isinstance(v.left, ast.Attribute):
                    rid = cmp.value.id + '.' + cmp.attr
                    vstack.append(rid)
                else:
                    raise ExceptionParseError(f'{def_str} : Unknown cmp {cmp} for {bftype} ! {tstr}')

                cmp_op = v.ops[0].__class__.__name__
                if cmp_op not in AST_OP_DICT:
                    raise ExceptionParseError(f'{def_str} : Unsupported CmpOp({cmp_op}) in exception expr! {tstr}')
                vstack.append(AST_OP_DICT[cmp_op])
            elif isinstance(v, ast.Attribute):
                vf = v.value.id
                vattr = v.attr
                vstack.append(vf + '.' + vattr)
            else:
                raise ExceptionParseError(f'{def_str} : Unsupported expr op {v} in {tstr}!')

        push_v(t.body)

        return vstack

    def toDict(self):
        return {'class':'ASLException', 'name':self.mName, 'metadata':self.mMetadata, 'data':self.mData, 'pdata':self.mPData}

    def __repr__(self):
        return str(self)

    def __str__(self):
        return f'ASLException({self.mName})'

    def getDef(self):
        if 'def' in self.mMetadata:
            fdef = self.mMetadata['def']
            return f'{fdef[0]}:{fdef[1]}'
        else:
            return ''

class ASLParseError(Exception):
    pass

class ASLParser:
    def __init__(self):
        self.mCallbacks = {k:self.__getattribute__('proc'+k) for k in ALL_KEYS}
        self.mData = {}

    def reset(self):
        self.mData = {}

    def pushData(self, kname, kdata, *, return_exception=False):
        if kname in self.mData:
            e = KeyError(f'Type "{kname}" redefined!')
        else:
            e = None

        if e is not None:
            if return_exception:
                return e
            else:
                raise e

        self.mData[kname] = kdata
        return e

    def parse(self, fname):
        ''' Parsing input file into ASL data dicts.

            NOTE:
                all key callback accepts (self, f:ASLFileContentProvider, l0, l1) as input,
                in which l0:l1=[l0, l1) is the line range.

                Example: (l0=4, l1=8) means line range [4, 8) = [4,5,6,7]

            NOTE:
                line range is counting from **0**, not **1**.

        '''
        self.mFileName = fname
        self.mF = ASLFileContentProvider(fname)

        # splitting lines with top keys
        kidx = []
        for iline, pline in enumerate(self.mF.P):

            if len(pline)==0:
                continue

            r1 = p_topkey_line.match(pline)
            if r1 is not None:
                tkey = r1.group('KeyName')
                kidx.append((tkey, iline))

        for i, (tkey, iline) in enumerate(kidx):
            if i==len(kidx)-1:
                lend = len(self.mF.P) # until file end
            else:
                lend = kidx[i+1][1] # until next top key

            # process with corresponding callback
            kname, kdata = self.mCallbacks[tkey](self.mF, iline, lend)

            # only for skipping untreated types
            if kname is None or kname == '':
                continue

            # push Top-key data, emit error if any
            # TODO: check redefinition before parsing?
            if isinstance(kname, list):
                for k, d in zip(kname, kdata):
                    e = self.pushData(k, d, return_exception=True)
                    if e is not None:
                        self.mF.emitParseError(iline, str(e))
            else:
                e = self.pushData(kname, kdata, return_exception=True)
                if e is not None:
                    self.mF.emitParseError(iline, str(e))

    def parseMembers(self, f:ASLFileContentProvider, l0, l1):
        # l0 should be first content line, not def line of top key
        mkidx = []
        for lidx in range(l0, l1):
            pline = f.P[lidx]
            res = p_memberkey_line.match(pline)
            if res is not None:
                kname = res.group('KeyName')
                mkidx.append((kname, lidx))

        if len(mkidx)==0:
            return None # No member contents

        mkrange = []
        for i, (mkey, iline) in enumerate(mkidx):
            if i==len(mkidx)-1:
                lend = l1
            else:
                lend = mkidx[i+1][1]

            mkrange.append((mkey, iline, lend))

        return mkrange

    @staticmethod
    def stripComment(s):
        return p_comment.subn('', s)[0].strip()

    def proc__DefBitFieldType(self, f:ASLFileContentProvider, l0, l1):
        '''

        '''
        res = p_def_bitfieldtype.match(f.P[l0])
        if res is None:
            f.emitParseError(l0, 'Invalid bit field type definition!')

        name = res.group('Name')
        bw = int(res.group('BitWidth'))
        metadata = {'def': (f.getFilename(), l0+1)}  # note l0 starts from 0, thus +1 for better editor view

        bft = ASLBitFieldType(name, bw, metadata)

        prev_val = -1
        for iline in range(l0+1, l1):
            pline = f.P[iline]
            if len(pline) == 0:
                continue

            rl = p_enum.match(pline)
            if rl is None:
                f.emitParseError(iline, 'Invalid  bit field enum definition!')

            e_name = rl.group('Name')
            e_value = rl.group('Value')
            if e_value is None or len(e_value)==0:
                e_value = prev_val + 1
            else:
                e_value = int(e_value, 16)
            prev_val = e_value
            e = bft.push(e_name, e_value, return_exception=True)
            if e is not None:
                f.emitParseError(iline, str(e))

        return name, bft

    def proc__DefImmBitFieldType(self, f:ASLFileContentProvider, l0, l1):
        ''' ImmBitFieldType cannot enumerate all members, thus need special treatment.
            TODO: Currently the treatment is hardcoded in parser, not in input ASL files.
        '''
        res = p_def_bitfieldtype.match(f.P[l0])
        if res is None:
            f.emitParseError(l0, 'Invalid DefImmBitFieldType!')

        kname = res.group('Name')
        bw = res.group('BitWidth')

        if ':' in bw: # UImm/SImm defined in groups
            bws = bw.split(':')
            i0, i1 = int(bws[0]), int(bws[1])

            knames = []
            kdatas = []
            for i in range(i0, i1):
                k = f'{kname}{i}'
                knames.append(k)
                # CAUTION: metadata should be deepcopied here, otherwise all may share the same dict obj
                bft = ASLBitFieldType(k, i, metadata = {'def':(f.getFilename(), l0+1)}, vtype=kname)
                kdatas.append(bft)
            return knames, kdatas
        else:
            bw = int(bw)
            bft = ASLBitFieldType(kname, bw, metadata = {'def':(f.getFilename(), l0+1)}, vtype=kname)
            return kname, bft

    def proc__DefPropType(self, f:ASLFileContentProvider, l0, l1):
        ''' PropType is pretty like BitFieldType, but not for encoding.'''
        name, bft = self.proc__DefBitFieldType(f, l0, l1)
        bft.mVType = 'Prop'
        return name, bft

    def __proc__DefOpGroup(self, f:ASLFileContentProvider, l0, l1, gtype='Group'):
        res = p_group.match(f.P[l0])
        if res is None:
            f.emitParseError(l0, f'Error when parsing Def{gtype}!')

        gname = res.group('Name')
        # metadata =
        inherit_s = res.group('Inherit')
        if inherit_s is None or inherit_s=='':
            inherit = []
        else:
            inherit = inherit_s.split(',')

        metadata = {'def':(f.getFilename(), l0+1), 'inherit':inherit}
        g = ASLGroup(gname, gtype, {}, metadata)

        mklines = self.parseMembers(f, l0+1, l1)
        if mklines is None: # TODO: no contents? Emit error?
            return gname, g

        for mkey, m_l0, m_l1 in mklines:
            if mkey in g.mData: #
                f.emitParseError(m_l0, f'Redefined member "{mkey}" in ASLGroup "{gname}"!')
            if m_l0 == m_l1: # empty member, only one line
                continue

            mname, mdata = self.mCallbacks[mkey](f, m_l0+1, m_l1, name=gname)
            if mname is None: # No content ? May just return empty data?
                continue

            g.mData[mname] = mdata
        return gname, g

    def proc__DefGroup(self, f:ASLFileContentProvider, l0, l1):
        return self.__proc__DefOpGroup(f, l0, l1, 'Group')

    def proc__DefOptype(self, f:ASLFileContentProvider, l0, l1):
        return self.__proc__DefOpGroup(f, l0, l1, 'Optype')

    def proc__DefOpcode(self, f:ASLFileContentProvider, l0, l1):
        return self.__proc__DefOpGroup(f, l0, l1, 'Opcode')

    # def proc__Format(self, f:ASLFileContentProvider, l0, l1, *, name=''):
    #     return '__Format', doKeywordsEscape(''.join(f.L[l0:l1]))

    def proc__Encoding(self, f:ASLFileContentProvider, l0, l1, *, name=''):
        ''' '''
        enc = ASLEncoding(name, metadata={'def':(f.getFilename(), l0)}) # no +1 because def line is removed, only content line here
        for iline, pline in f.enumPLines(l0, l1):
            if len(pline)==0:
                continue

            res = p_field.match(pline)
            if res is None:
                f.emitParseError(iline, 'Invalid encoding line!')

            frange_s = res.group('FRange')
            ftype = res.group('FType')
            fvar = res.group('FVar')
            fval = res.group('FVal')

            frange = []
            for v_frange in frange_s.split(';'):
                vs = tuple(v_frange.split(','))
                v0, vn = int(vs[0]), int(vs[1])
                frange.append((v0, vn))

            bf = ASLBitField(frange, ftype, fvar, fval, src=name)

            e = enc.push(bf, return_exception=True)
            if e is not None:
                f.emitParseError(iline, str(e))

        return '__Encoding', enc

    def proc__Property(self, f:ASLFileContentProvider, l0, l1, *, name=''):
        return '__Property', doKeywordsEscape(''.join(f.L[l0:l1]))

    def proc__OperandInfo(self, f:ASLFileContentProvider, l0, l1, *, name=''):
        info = ASLOperandInfo(name, metadata={'def':(f.getFilename(), l0)}) # no +1 because def line is removed, only content line here
        mdata = {}
        plaintext = ''
        for iline, pline in f.enumPLines(l0, l1):
            # pline = re.subn(r'\s+', '', pline)[0]
            if len(pline)==0:
                if len(plaintext) > 0:
                    plaintext += '\n'    ## do not omit space lines in markdown plaintext
                continue

            res = p_operandinfo_attr.match(pline)
            if res is None:
                ## Instead of throwing an error, store unknown content as plain text
                ## Get the original line content (not processed line!)
                plaintext += f.getLine(iline)
                continue

            attr = res.group('Attr')
            obj = res.group('Obj')
            val = res.group('Value')

            if attr in OPERANDINFO_LISTS:
                if val is not None:
                    f.emitParseError(iline, f'OperandInfo attr {attr} should not have value set!')
                mdata[attr] = obj
            elif attr in OPERANDINFO_DICTS:
                if val is None:
                    f.emitParseError(iline, f'OperandInfo attr {attr} should set value "=..." !')
                elif obj is None or len(obj)==0:
                    f.emitParseError(iline, f'OperandInfo attr {attr} should set object "{attr}<obj> = ..." !')

                val = val.lstrip(' =')
                if attr in mdata:
                    mdata[attr][obj] = val
                else:
                    mdata[attr] = {obj:val}

        # Store plain text content if any
        if len(plaintext) > 0:
            mdata[OPERANDINFO_PLAINTEXT] = plaintext

        info.mData = mdata
        return '__OperandInfo', info #doKeywordsEscape(''.join(f.L[l0:l1]))

    def proc__Exception(self, f:ASLFileContentProvider, l0, l1, *, name=''):
        excep = ASLException(name, metadata={'def':(f.getFilename(), l0)}) # no +1 because def line is removed, only content line here

        for iline, pline in f.enumPLines(l0, l1):
            # pline = re.subn(r'\s+', '', pline)[0]
            if len(pline)==0:
                continue

            res = p_exception_line.match(pline)
            if res is None:
                f.emitParseError(iline, 'Invalid exception line!')

            etype = res.group('ExcepType')
            ename = res.group('ExcepName')
            reason = res.group('Reason').strip('"') if res.group('Reason') is not None else ''
            cond_expr = res.group('CondExpr')
            v = (name, reason, cond_expr)

            if etype == 'EncodingError':
                if cond_expr is None:
                    f.emitParseError(iline, 'No condition expression defined for EncodingError!')
                d = excep.mData['EncodingError']
                if ename in d:
                    if v not in d[ename]:
                        d[ename].append(v)
                else:
                    d[ename] = [v]
            else:
                if cond_expr is not None:
                    f.emitParseError(iline, 'RuntimeException should have no CondExpr set!')

                s = excep.mData['RuntimeException']
                sv = (name, ename)
                if sv not in s:
                    s.append(sv)

        return '__Exception', excep

    def proc__Examples(self, f:ASLFileContentProvider, l0, l1, *, name=''):
        return '__Examples', doKeywordsEscape(''.join(f.L[l0:l1]))

    def proc__Semantics(self, f:ASLFileContentProvider, l0, l1, *, name=''):
        return '__Semantics', doKeywordsEscape(''.join(f.L[l0:l1]))

    def proc__Description(self, f:ASLFileContentProvider, l0, l1, *, name=''):
        return '__Description', doKeywordsEscape(''.join(f.L[l0:l1]))

    def proc__Simulation(self, f:ASLFileContentProvider, l0, l1, *, name=''):
        return '__Simulation', doKeywordsEscape(''.join(f.L[l0:l1]))
    
    def proc__Syntax(self, f:ASLFileContentProvider, l0, l1, *, name=''):
        return '__Syntax', doKeywordsEscape(''.join(f.L[l0:l1]))
    
    def proc__ModifierInfo(self, f:ASLFileContentProvider, l0, l1, *, name=''):
        return '__ModifierInfo', doKeywordsEscape(''.join(f.L[l0:l1]))

class ASLPostProcessError(Exception):
    pass

class ASLData:
    def __init__(self):
        self.mData = {}
        self.mParser = ASLParser()

        self.mOpGroups = {}
        self.mTypes = {}

        self.mSHAObj = hashlib.sha256()
        self.mFileList = []
        self.mBuildInfo = {}

    def parseFiles(self, flist):
        if isinstance(flist, list):
            for fname in flist:
                if '*' in fname or '?' in fname:
                    self.parseFilePattern(fname, sync_data=False)
                else:
                    self.parseFile(fname, sync_data=False)
        elif isinstance(flist, str):
            if '*' in flist or '?' in flist:
                self.parseFilePattern(flist, sync_data=False)
            else:
                self.parseFile(flist, sync_data=False)
        else:
            raise TypeError(f'Unknown input flist type : {type(flist)}!')

        self.syncParserData()

    def parseFilePattern(self, fpattern, *, sync_data=True, do_sort=True):
        flist = [fname for fname in glob.iglob(fpattern)]
        if do_sort:
            flist.sort()

        for fname in flist:
            self.parseFile(fname, sync_data=False)

        if sync_data:
            self.syncParserData()

    def parseFile(self, fname, *, sync_data=True):
        self.mParser.parse(fname)

        # generate hash info
        d = getFileSHA256(fname)
        getFileSHA256(fname, self.mSHAObj)
        self.mFileList.append((fname, d))

        if sync_data:
            self.syncParserData()

    def generateHashInfo(self):
        fscript = os.path.abspath(__file__)
        fpath = pathlib.Path(fscript)
        hd = getFileSHA256(fscript)
        sname = str(fpath.relative_to(fpath.parent.parent))
        self.mFileList.append((sname, hd))

        t = time.strftime('%Y.%m.%d %H:%M:%S')

        self.mBuildInfo['BuildTime'] = t
        self.mBuildInfo['InputSHA256'] = self.mSHAObj.hexdigest()

    def syncParserData(self):
        self.mData = self.mParser.mData

        # split types and
        for k, v in self.mData.items():
            if isinstance(v, ASLBitFieldType):
                self.mTypes[k] = v
            elif isinstance(v, ASLGroup):
                self.mOpGroups[k] = v
            else: #
                pass

    def doPostProcess(self, sav_dir:str):
        # check bitfield bitwidth matching the BitFieldType
        checkDirExistence(sav_dir)

        pic_dir = os.path.join(sav_dir, 'pics')
        checkDirExistence(pic_dir)

        for k, v in self.mOpGroups.items():
            if '__Encoding' in v.mData:
                v.mData['__Encoding'].checkBitWidth(self.mTypes)

        # build group inheritance network
        self.mOpGraph = nx.DiGraph(name='OpGroups')
        self.mOpGraph.add_nodes_from(self.mOpGroups.keys())

        for op, opv in self.mOpGroups.items():
            if opv.mMetadata is not None and 'inherit' in opv.mMetadata:
                inherit_oplist = opv.mMetadata['inherit']
                for inh_op in inherit_oplist:
                    if inh_op not in self.mOpGroups:
                        raise ASLPostProcessError(f'Unknown inherit group {inh_op} for {op} : {opv.getDef()}!')

                    self.mOpGraph.add_edge(inh_op, op)

        # check cyclic inheritance
        try:
            c_edges = nx.find_cycle(self.mOpGraph)
            emsg = 'Cyclic edges: ' + str(c_edges) + '\n'
            nset = set()
            for ea, eb in c_edges:
                nset.add(ea)
                nset.add(eb)

            for n in nset:
                emsg += f'  {n} defined in {self.mOpGroups[n].getDef()};\n'
            raise ASLPostProcessError(f'Cyclic inheritance found!\n' + emsg)

        except nx.NetworkXNoCycle:
            pass

        #
        print('Saving dot graph...')
        A = nx.nx_agraph.to_agraph(self.mOpGraph)
        # A.layout()
        nx.drawing.nx_agraph.write_dot(self.mOpGraph, os.path.join(sav_dir, 'pics', 'opgraph.dot'))

        # merge encoding/operandinfo fields
        for node in nx.topological_sort(self.mOpGraph):
            for e in self.mOpGraph.in_edges(node):
                pnode, _ = e
                # print(f'{pnode} => {node}')

                # merging encoding
                if '__Encoding' in self.mOpGroups[pnode].mData:
                    if '__Encoding' not in self.mOpGroups[node].mData:
                        self.mOpGroups[node].mData['__Encoding'] = ASLEncoding(node, metadata={'def':('None', 0)})

                    print(f'Merging __Encoding {pnode} into {node}...')
                    self.mOpGroups[node].mData['__Encoding'].merge(self.mOpGroups[pnode].mData['__Encoding'])

                # merging operand info
                if '__OperandInfo' in self.mOpGroups[pnode].mData:
                    if '__OperandInfo' not in self.mOpGroups[node].mData:
                        self.mOpGroups[node].mData['__OperandInfo'] = ASLOperandInfo(node, metadata={'def':('None',0)})

                    print(f'Merging __OperandInfo {pnode} into {node}...')
                    self.mOpGroups[node].mData['__OperandInfo'].merge(self.mOpGroups[pnode].mData['__OperandInfo'])

                # merging exception
                if '__Exception' in self.mOpGroups[pnode].mData:
                    if '__Exception' not in self.mOpGroups[node].mData:
                        self.mOpGroups[node].mData['__Exception'] = ASLException(node, metadata={'def':('None',0)})

                    print(f'Merging __Exception {pnode} into {node}...')
                    self.mOpGroups[node].mData['__Exception'].merge(self.mOpGroups[pnode].mData['__Exception'])

        print("Checking optypes...")
        optypes_kv = self.mTypes['Optype'].mKV
        for optype, v in optypes_kv.items():
            if optype not in self.mOpGroups:
                raise ASLPostProcessError(f"WARNING!!! Optype {optype}={v} not defined!\n")
            elif self.mOpGroups[optype].mType != 'Optype':
                raise ASLPostProcessError(f"WARNING!!! Optype {optype}={v} defined with wrong type! {self.mOpGroups[optype].getDef()}")
            else:
                pass

        print("Checking opcodes...")
        optype_dict = {}
        opcode_dict = {}
        for op, opv in self.mOpGroups.items():
            if '__Encoding' in opv.mData:
                enc = opv.mData['__Encoding']
                enc.fillValues(self.mTypes)
                enc.sortFields()

                if opv.mType=='Opcode':
                    enc.checkAssociatedFields()
                    if 'optype' not in enc.mFieldConstants:
                        raise ASLPostProcessError(f"optype not set for Opcode {op}! {opv.getDef()}")
                    if 'stype' not in enc.mFieldConstants:
                        raise ASLPostProcessError(f"stype not set for Opcode {op}! {opv.getDef()}")

                    opcode = enc.mFieldConstants['optype'] + (enc.mFieldConstants['stype']<<8)
                    if opcode in opcode_dict:
                        cop = opcode_dict[opcode]
                        raise ASLPostProcessError(f"Opcode contradiction ! {cop} vs {op} : {opv.getDef()}")
                    else:
                        opcode_dict[opcode] = op

                    if '__OperandInfo' not in opv.mData:
                        raise ASLPostProcessError(f"__OperandInfo not set for Opcode {op}! {opv.getDef()}")

                    opv.mData['__OperandInfo'].parseInfo(enc, self.mTypes)
                    opv.mData['__OperandInfo'].checkAttr(enc)
                    opv.mData['__OperandInfo'].checkModiOrder(enc, opv, self.mTypes)

                    if '__Exception' in opv.mData:
                        opv.mData['__Exception'].parseData(enc, self.mOpGroups, self.mTypes)

                elif opv.mType=='Optype':
                    if 'optype' not in enc.mFieldConstants:
                        raise ASLPostProcessError(f"optype not set for Optype {op}! {opv.getDef()}")
                    opk = enc.mFieldConstants['optype']
                    if opk in optype_dict:
                        cop = optype_dict[opk]
                        raise ASLPostProcessError(f"Optype contradiction ! {cop} vs {op} : {opv.getDef()}")
                    else:
                        optype_dict[opk] = op

                    # optype without any opcode defined
                    if self.mOpGraph.out_degree(op)==0:
                        raise ASLPostProcessError(f"Optype without opcodes defined! {op} : {opv.getDef()}")

    def generateEncodingPics(self, sav_dir:str, rebuild:bool=False):
        pic_dir = os.path.join(sav_dir, 'pics')
        if not os.path.isdir(pic_dir):
            os.makedirs(pic_dir)

        for op, opv in self.mOpGroups.items():
            if '__Encoding' in opv.mData:
                ## check if pic is already drawn
                pic_fname = os.path.join(sav_dir, 'pics', op+'.'+IMAGE_FORMAT)
                if not rebuild and os.path.exists(pic_fname):
                    ## compare time stamp of definition file (*.asl) and existing picture file
                    def_fname = opv.mMetadata['def'][0]
                    if os.path.getmtime(def_fname) < os.path.getmtime(pic_fname):
                        ## the picture file is already latest, no need to draw it again
                        continue
                enc = opv.mData['__Encoding']
                enc.draw(pic_fname, title=f'{opv.mType} : {opv.mName}')

    def generateDocs(self):
        pass

    def generateOpModis(self, fname:str):
        with open(fname, 'w+', encoding='utf-8') as fout:
            for op, opv in self.mOpGroups.items():
                if opv.mType=='Opcode':
                    enc = opv.mData['__Encoding']
                    opr_info = opv.mData['__OperandInfo']

                    fout.write('#### ' + opv.mName + '\n')
                    for opm in enc.enumerateOpModi(opr_info, self.mTypes):
                        fout.write('  ' + opm + '\n')
                    fout.write('\n')

    def generateOpFuncs(self, fname:str):
        repos = {}
        with open(fname, 'w+', encoding='utf-8') as fout:
            for op, opv in self.mOpGroups.items():
                if opv.mType=='Opcode':
                    enc = opv.mData['__Encoding']
                    opr_info = opv.mData['__OperandInfo']
                    excep = opv.mData['__Exception']

                    # fout.write('#### ' + opv.mName + '\n')
                    for fv, ftag in enc.enumerateFunc(opr_info, excep, self.mTypes):
                        if fv is None:
                            fout.write(f'Invalid func : {ftag}\n')
                        elif fv not in repos:
                            fout.write(f'{fv:#034x} : {ftag}\n')
                            repos[fv] = ftag
                        else:
                            if ftag != repos[fv]:
                                fout.write(f'CAUTION!!! Conflict {ftag} vs {repos[fv]} for {fv:#034x}!\n')
        self.mOpFuncs = repos
        return repos
    
    @staticmethod
    def extractMeta(content: str) -> dict:
        """Extract metadata from markdown content.
           the 2nd return val is content without any metadata.
        """
        metadata = {}
        body_lines = []
        lines = content.splitlines(keepends=True)

        i = 0
        body_beg_line_id = 0
        while i < len(lines):
            line = lines[i]
            match = re.match(r'^<!--\s*([^:]+):\s*(.*?)\s*-->\s*$', line.strip())
            if match:
                key = match.group(1).strip()
                value = match.group(2).strip()
                metadata[key] = value
                body_beg_line_id = i + 1
            elif '<!-- SEPARATOR -->' in line:
                ## separator between metadata and content
                body_beg_line_id = i + 1
                break
            i += 1

        body_lines = lines[body_beg_line_id:]
        ## over
        return metadata, ''.join(body_lines)

    @staticmethod
    def embedMeta(metadata: dict, content: str) -> str:
        """Embed metadata into markdown content."""
        md_hash = metadata['HASH']
        md_time = metadata['TIME']
        ## 'LATEST_VERSION' field may be missing
        md_tag = metadata['LATEST_VERSION'] if 'LATEST_VERSION' in metadata else ''
        if 'ASL_HASH' in metadata:
            md_asl_hash = metadata['ASL_HASH']
            md_asl_time = metadata['ASL_TIME']
            metadata_lines = [f'<!-- HASH: {md_hash} -->', \
                              f'<!-- TIME: {md_time} -->', \
                              f'<!-- ASL_HASH: {md_asl_hash} -->', \
                              f'<!-- ASL_TIME: {md_asl_time} -->', \
                              f'<!-- LATEST_VERSION: {md_tag} -->', \
                              f'<!-- SEPARATOR -->\n\n']
        else:
            metadata_lines = [f'<!-- HASH: {md_hash} -->', \
                              f'<!-- TIME: {md_time} -->', \
                              f'<!-- LATEST_VERSION: {md_tag} -->', \
                              f'<!-- SEPARATOR -->\n\n']
        return '\n\n'.join(metadata_lines) + content
    
    def generateMarkdown(self, md_dir:str, tag: str = None, rebuild:bool=False):
        # create md dir if not exist
        checkDirExistence(md_dir)
        
        def getOpRef(op, gtype=None, opf=None):
            if gtype is None:
                opv = self.mOpGroups[op]
                gtype = opv.mType
            s = f'{gtype}_{op}.md'
            if opf is not None:
                s += f'#{opf}'
            return s

        def getBitFieldTypeRef(bfname):
            return f'BitFieldTypes.md#{bfname}'

        def cvtRefLinks(text):
            ptext = text
            for m in p_reflink.finditer(text):
                target = m.group('Target')
                mstr = m.group()

                if target in self.mTypes:
                    rep = f'[{target}](BitFieldTypes.md#{target})'
                elif target in self.mOpGroups:
                    gtype = self.mOpGroups[target].mType
                    ftag = f'{gtype}_{target}'
                    rep = f'[{target}]({ftag}.md)'
                else:
                    print(f'WARNING!!! RefLink target {mstr} not recoginzed!')
                    continue

                print(f'Converting ref link {mstr} to {rep}...')
                ptext = ptext.replace(mstr, rep)
            return ptext

        ## generate new content (without metadata)
        md_content_chars = [f'# BitFieldTypes\n\n']
        for bfname, bft in self.mTypes.items():
            if bft.mVType in {'UImm', 'SImm'}: # TODO
                continue
            md_content_chars.append(f'<a id="{bfname}"></a>\n\n') # extra line needed
            md_content_chars.append(f'## {bfname}\n\n')
            md_content_chars.append(f'- `Bitwidth = {bft.mBitWidth}`\n')
            md_content_chars.append(f'- `Metadata = {bft.mMetadata}`\n')
            md_content_chars.append(f'- `VType = {bft.mVType}`\n')
            md_content_chars.append(f'- `MaxV = {bft.mMaxV:#x}`\n')
            if bft.mVType in {'Enum', 'Prop'}:
                md_content_chars.append(f'- `Enums:`\n\n')
                md_content_chars.append('```csharp\n')
                md_content_chars.append(f'__DefBitFieldType {bfname}<{bft.mBitWidth}>\n')
                slen = max([len(k) for k in bft.mKV])
                for k, v in bft.mKV.items():
                    ks = ('{:' + str(slen) + '}').format(k)
                    md_content_chars.append(f'  {ks} = {v:#x};\n')
                md_content_chars.append('```\n\n')

        md_content_chars.append(f'<a id="UImm"></a>\n\n') # extra line needed
        md_content_chars.append(f'## UImm\n\n')
        md_content_chars.append(f'- `Bitwidth = [1:128]`\n')
        md_content_chars.append(f'- `VType = UImm`\n')
        md_content_chars.append(f'<a id="SImm"></a>\n\n') # extra line needed
        md_content_chars.append(f'## SImm\n\n')
        md_content_chars.append(f'- `Bitwidth = [2:128]`\n')
        md_content_chars.append(f'- `VType = SImm`\n')
        ## merge to single text string
        md_content = ''.join(md_content_chars)
        
        ## generate time stamp, hash and tag
        md_timestamp = time.strftime('%Y.%m.%d %H:%M:%S')
        md_hash = get_content_sha256(md_content)
        md_tag = tag if tag is not None else ''

        ## load history file
        md_metadata = {}
        md_fname = os.path.join(md_dir, 'BitFieldTypes.md')
        if os.path.isfile(md_fname):
            with open(md_fname, 'r', encoding='utf-8') as f:
                md_content_history = f.read()
                md_metadata, _ = ASLData.extractMeta(md_content_history)
        ## add asl definition file hash and time info
        md_metadata['ASL_HASH'] = self.mBuildInfo['InputSHA256']
        md_metadata['ASL_TIME'] = self.mBuildInfo['BuildTime']
        ## update timestamp and hash in metadata
        md_metadata['TIME'] = md_timestamp
        md_metadata['HASH'] = md_hash
        ## update tag in metadata if necessary
        if len(md_tag) > 0:
            md_metadata['LATEST_VERSION'] = md_tag
        elif 'latest_version' not in md_metadata:
            ## if the history file has no metadata at all
            ## we must make sure it has an empty version at least
            md_metadata['LATEST_VERSION'] = ' '
        else:
            ## just use the latest version in history file
            pass

        ## embed metadata to markdown content
        md_content = ASLData.embedMeta(md_metadata, md_content)

        ## write .md file
        with open(md_fname, 'w', encoding='utf-8') as fout:
            fout.write(md_content)

        md_content_chars = [f'# Group list\n\n']
        for op, opv in self.mOpGroups.items():
            gtype = opv.mType
            ftag = f'{gtype}_{op}'
            md_content_chars.append(f'## [{op}]({ftag}.md)\n\n')
            md_content_chars.append(f'<img src="../pics/{op}.{IMAGE_FORMAT}" alt="{ftag}" style="width:100%; height:auto;">\n\n')
        ## merge to single text string
        md_content = ''.join(md_content_chars)

        ## generate time stamp, hash and tag
        md_timestamp = time.strftime('%Y.%m.%d %H:%M:%S')
        md_hash = get_content_sha256(md_content)
        md_tag = tag if tag is not None else ''

        ## load history file
        md_metadata = {}
        md_fname = os.path.join(md_dir, 'op_encodings.md')
        if os.path.isfile(md_fname):
            with open(md_fname, 'r', encoding='utf-8') as f:
                md_content_history = f.read()
                md_metadata, _ = ASLData.extractMeta(md_content_history)
        ## add asl definition file hash and time info
        md_metadata['ASL_HASH'] = self.mBuildInfo['InputSHA256']
        md_metadata['ASL_TIME'] = self.mBuildInfo['BuildTime']
        ## update timestamp and hash in metadata
        md_metadata['TIME'] = md_timestamp
        md_metadata['HASH'] = md_hash
        ## update tag in metadata if necessary
        if len(md_tag) > 0:
            md_metadata['LATEST_VERSION'] = md_tag
        ## embed metadata to markdown content
        md_content = ASLData.embedMeta(md_metadata, md_content)

        ## write .md file
        with open(md_fname, 'w', encoding='utf-8') as fout:
            fout.write(md_content)

        # enumerate ops
        opg_md_content_chars = [f'# Group list\n\n']

        for op, opv in self.mOpGroups.items():
            gtype = opv.mType
            ftag = f'{gtype}_{op}'
            ## a new op group
            opg_md_content_chars.append(f'- {gtype} : [{op}]({ftag}.md)\n\n')

            md_fname = os.path.join(md_dir, ftag + '.md')
            ## check if md file is already generated
            if not rebuild and os.path.exists(md_fname):
                ## compare time stamp of definition file (*.asl) and existing picture file
                def_fname = opv.mMetadata['def'][0]
                if os.path.getmtime(def_fname) < os.path.getmtime(md_fname):
                    ## md file is already latest, no need to generate it again
                    continue

            print(f'Writing {md_fname}...')
            md_content_chars = [f'# {gtype}_{op}\n\n']
            # basic info
            for opf in opv.mData:
                md_content_chars.append(f'<a id="{ftag}{opf}"></a>\n\n') # extra line needed
                md_content_chars.append(f'## {opf}\n\n')
                # encoding
                if opf == '__Encoding':
                    md_content_chars.append(f'<img src="../pics/{op}.{IMAGE_FORMAT}" alt="{ftag}" style="width:100%; height:auto;">\n\n')
                    enc = opv.mData['__Encoding']
                    for bf, bfv in enc.mFields.items():
                        ts  = f'- field `{bfv.mRange}` : '
                        ts += f'[{bfv.mType}]({getBitFieldTypeRef(bfv.mType)}) '
                        ts += f'`{bfv.mVar} {bfv.mVal}`'
                        ts += f', Src=[{bfv.mSrc}]({getOpRef(bfv.mSrc, None, '__Encoding')})'
                        ts += '\n'
                        md_content_chars.append(ts)
                elif opf == '__OperandInfo':
                    md_text = ''
                    info = opv.mData['__OperandInfo']
                    if gtype == 'Opcode':
                        ## for Opcode nodes, omit markdown text
                        ## which has already been displayed in parent Optype node
                        md_content_chars.append('```\n')
                        for k, v in info.mData.items():
                            if k in OPERANDINFO_LISTS:
                                md_content_chars.append(f'{k} : [{v}];\n')
                            elif k in OPERANDINFO_DICTS:
                                md_content_chars.append(f'{k} :\n')
                                for k2, v2 in v.items():
                                    md_content_chars.append(f'  <{k2}> = {v2};\n')
                        md_content_chars.append('```\n\n')
                    else:
                        ## for Optype nodes, only keep markdown text
                        if OPERANDINFO_PLAINTEXT in info.mData:
                            md_text = info.mData[OPERANDINFO_PLAINTEXT]
                    ## add plain text (if any) to the end of this node, in markdown format
                    if len(md_text):
                        ## automatically separate text lines with table/list
                        md_text = MarkdownTextRegularizer.insertBlankLines(md_text)
                        md_content_chars.append(f'{md_text}\n')
                elif opf == '__Exception':
                    excep = opv.mData['__Exception']
                    md_content_chars.append('```\n')
                    for ename, elist in excep.mData['EncodingError'].items():
                        # print(elist)
                        for esrc, reason, expr in elist:
                            md_content_chars.append(f'  EncodingError<{ename}, {reason}> = {expr}; // Src = {esrc}\n')
                    for esrc, ename in excep.mData['RuntimeException']:
                        md_content_chars.append(f'  RuntimeException<{ename}>; // Src={esrc} \n')
                    md_content_chars.append('```\n')
                else:
                    ## these are sections only containing plain text
                    ## automatically separate text lines with table/list
                    md_text = MarkdownTextRegularizer.insertBlankLines(opv.mData[opf])
                    # TODO: only cvt ref links for text sections
                    md_content_chars.append(cvtRefLinks(md_text) + '\n\n')
            if gtype == 'Group':
                md_content_chars.append("## Predecessors\n\n")
                for vop in self.mOpGraph.predecessors(op):
                    md_content_chars.append(f"- [{vop}]({getOpRef(vop)})\n")
                md_content_chars.append("## Successors\n\n")
                for vop in self.mOpGraph.successors(op):
                    md_content_chars.append(f"- [{vop}]({getOpRef(vop)})\n")
            elif gtype == 'Optype':
                md_content_chars.append("## Predecessors\n\n")
                for vop in self.mOpGraph.predecessors(op):
                    md_content_chars.append(f"- [{vop}]({getOpRef(vop)})\n")
                md_content_chars.append("## Opcode list\n\n")
                for vop in self.mOpGraph.successors(op):
                    md_content_chars.append(f"- [{vop}]({getOpRef(vop)})\n")
                    md_content_chars.append(f'<img src="../pics/{vop}.{IMAGE_FORMAT}" alt="{vop}" style="width:100%; height:auto;">\n\n')
            elif gtype == 'Opcode':
                md_content_chars.append("## Predecessors\n\n")
                for vop in self.mOpGraph.predecessors(op):
                    md_content_chars.append(f"- [{vop}]({getOpRef(vop)})\n")

            ## merge to single text string
            md_content = ''.join(md_content_chars)

            ## generate time stamp, hash and tag
            md_timestamp = time.strftime('%Y.%m.%d %H:%M:%S')
            md_hash = get_content_sha256(md_content)
            md_tag = tag if tag is not None else ''

            ## load history file
            md_metadata = {}
            if os.path.isfile(md_fname):
                with open(md_fname, 'r', encoding='utf-8') as f:
                    md_content_history = f.read()
                    md_metadata, _ = ASLData.extractMeta(md_content_history)
            ## add asl definition file hash and time info
            md_metadata['ASL_HASH'] = self.mBuildInfo['InputSHA256']
            md_metadata['ASL_TIME'] = self.mBuildInfo['BuildTime']
            ## update timestamp and hash in metadata
            md_metadata['TIME'] = md_timestamp
            md_metadata['HASH'] = md_hash
            ## update tag in metadata if necessary
            if len(md_tag) > 0:
                md_metadata['LATEST_VERSION'] = md_tag
            ## embed metadata to markdown content
            md_content = ASLData.embedMeta(md_metadata, md_content)

            ## write .md file
            with open(md_fname, 'w', encoding='utf-8') as fout:
                fout.write(md_content)

        opg_md_content_chars.append(f'# Group list\n\n')
        opg_md_content_chars.append(f'<img src="../pics/opgraph.{IMAGE_FORMAT}" alt="opgraph">\n\n')
        ## merge to a single text string
        md_content = ''.join(opg_md_content_chars)

        ## generate time stamp, hash and tag
        md_timestamp = time.strftime('%Y.%m.%d %H:%M:%S')
        md_hash = get_content_sha256(md_content)
        md_tag = tag if tag is not None else ''

        ## load history file
        md_metadata = {}
        md_fname = os.path.join(md_dir, 'opgroups.md')
        if os.path.isfile(md_fname):
            with open(md_fname, 'r', encoding='utf-8') as f:
                md_content_history = f.read()
                md_metadata, _ = ASLData.extractMeta(md_content_history)
        ## add asl definition file hash and time info
        md_metadata['ASL_HASH'] = self.mBuildInfo['InputSHA256']
        md_metadata['ASL_TIME'] = self.mBuildInfo['BuildTime']
        ## update timestamp and hash in metadata
        md_metadata['TIME'] = md_timestamp
        md_metadata['HASH'] = md_hash
        ## update tag in metadata if necessary
        if len(md_tag) > 0:
            md_metadata['LATEST_VERSION'] = md_tag
        ## embed metadata to markdown content
        md_content = ASLData.embedMeta(md_metadata, md_content)

        ## write .md file
        with open(md_fname, 'w', encoding='utf-8') as fout:
            fout.write(md_content)

    def saveEncodingAsXLSX(self, xlsx_file:str):
        workbook = xlsxwriter.Workbook(xlsx_file)
        worksheet = workbook.add_worksheet()
        worksheet.freeze_panes(2, 1)

        vfmt = workbook.add_format({"bold": 1,"border": 1,"align": "center","valign": "vcenter","font_name":"Consolas"})
        ufmt = workbook.add_format({"bold": 1,"left": 1, "right":1, "bottom":0, "top":0, "align": "center","valign": "vcenter","fg_color": "#AAAAAA","font_name":"Consolas"})

        HEADER_ROWS=2
        for i in range(128):
            worksheet.write(i+HEADER_ROWS, 0, i, vfmt)

        icol = 1
        for op, opv in self.mOpGroups.items():
            gtype = opv.mType
            if gtype!='Opcode':
                continue

            icol += 1
            worksheet.write(1, icol, op, vfmt)
            worksheet.set_column(icol, icol, 14)

            enc = opv.mData['__Encoding']
            for bf, bfv in enc.mFields.items():
                for v0, vn in bfv.mRange:
                    if vn>1:
                        worksheet.merge_range(v0+HEADER_ROWS, icol, v0+vn-1+HEADER_ROWS, icol, bfv.mVar, vfmt)
                    else:
                        worksheet.write(v0+HEADER_ROWS, icol, bfv.mVar, vfmt)

            for i in range(128):
                if enc.mSetBits & (1<<i) == 0:
                    worksheet.write(HEADER_ROWS+i, icol, '', ufmt)

        workbook.close()

    def saveAsJson(self, json_file:str):
        def toJson(v):
            if isinstance(v, (ASLBitField, ASLBitFieldType, ASLGroup, ASLEncoding, ASLOperandInfo, ASLException)):
                return v.toDict()
            else:
                raise TypeError('Unserializable object!')

        with open(json_file, 'w', encoding='utf-8') as fout:
            d = {'FileList':self.mFileList, 'BuildInfo':self.mBuildInfo, 'BitFieldTypes':self.mTypes, 'OpGroups':self.mOpGroups}
            json.dump(d, fout, sort_keys=True, indent=2, default=toJson)

    def loadFromJson(self, json_file:str):
        pass

    def saveAsPickle(self, pickle_file:str):
        import pickle

        d = {}
        exempt_member = {'mSHAObj'}

        for k, v in self.__dict__.items():
            if k not in exempt_member:
                d[k] = v

        with open(pickle_file, 'wb') as fout:
            pickle.dump(d, fout)

p_mdlink = re.compile(r'\]\(\w+\.md(#\w+)?\)')

## this method is used to add metadata for external asset markdown files
def generateMetaForMdAssets(fname: str, tag: str = None) -> str:
    ''' generate metadata for external asset markdown files
        the original markdown file contains no metadata field.
        input: fname --- original markdown file full path
        return: a str --- markdown file content with prefix metadata field '''
    ## read original md file content (without metadata at all)
    with open(fname, 'r', encoding='utf-8') as fin:
        md_content = fin.read()
    ## generate metadata: HASH, TIME and LATEST_VERSION
    md_hash = get_content_sha256(md_content)
    md_timestamp = time.strftime('%Y.%m.%d %H:%M:%S')
    md_tag = tag if tag is not None else ''
    md_metadata = { 'HASH': md_hash, 'TIME': md_timestamp, 'LATEST_VERSION': md_tag }
    ## embed metadata field
    return ASLData.embedMeta(md_metadata, md_content)

def cvtMDLinks(s):
    ''' convert [ALL2](ALL.md#ALL2) to [ALL2](ALL.html#ALL2).

        NOTE 1: only .md in link will be converted
        NOTE 2: the replacement is rather simple and straight, .md in
    '''
    ss = s
    for res in p_mdlink.finditer(s):
        rstr = res.group().replace('.md', '.html')
        ss = p_mdlink.sub(rstr, ss, 1)
        # print(res.group())
        # print(ss)
    return ss

def md2html(md_name, html_name, style_file='styles.css'):
    print(f"Converting {md_name} => {html_name}...")
    file_content = open(md_name, 'r', encoding='utf-8').read()
    ## split meta data with content
    md_metadata, md_content = ASLData.extractMeta(file_content)
    r_content = cvtMDLinks(md_content)

    # 将Markdown转换为HTML
    html_content = markdown.markdown(r_content, extensions=['tables', 'fenced_code', 'codehilite'])

    ## create HTML header including meta data
    html_hash = md_metadata['HASH'] if 'HASH' in md_metadata else ""
    html_time = md_metadata['TIME'] if 'TIME' in md_metadata else ""
    html_tag = md_metadata['LATEST_VERSION'] if 'LATEST_VERSION' in md_metadata else ""

    ## create the complete HTML page
    if 'ASL_HASH' in md_metadata:
        html_asl_hash = md_metadata['ASL_HASH']
        html_asl_time = md_metadata['ASL_TIME']
        html_page = f'''<!DOCTYPE html>\n
                        <html lang="zh">\n
                        <head>\n
                        \t<meta charset="utf-8">\n
                        \t<meta name="HASH" content="{html_hash}">\n
                        \t<meta name="TIME" content="{html_time}">\n
                        \t<meta name="ASL_HASH" content="{html_asl_hash}">\n
                        \t<meta name="ASL_TIME" content="{html_asl_time}">\n
                        \t<meta name="LATEST_VERSION" content="{html_tag}">\n
                        \t<link rel="stylesheet" href="{style_file}">\n
                        </head>\n
                        <body>{html_content}</body>\n
                        '''
    else:
        html_page = f'''<!DOCTYPE html>\n
                        <html lang="zh">\n
                        <head>\n
                        \t<meta charset="utf-8">\n
                        \t<meta name="HASH" content="{html_hash}">\n
                        \t<meta name="TIME" content="{html_time}">\n
                        \t<meta name="LATEST_VERSION" content="{html_tag}">\n
                        \t<link rel="stylesheet" href="{style_file}">\n
                        </head>\n
                        <body>{html_content}</body>\n
                        '''
    
    ## write HTML page to file
    with open(html_name, 'w', encoding='utf-8') as file:
        file.write(html_page)

def batch_cvt(md_dir, html_dir, rebuild=False):
    checkDirExistence(html_dir)
    for md_name in glob.iglob(os.path.join(md_dir, "*.md")):
        _, md_fname = os.path.split(md_name)
        html_name = os.path.join(html_dir, md_fname[:-3]+'.html')
        ## check if html is already generated
        if not rebuild and os.path.exists(html_name):
            ## compare time stamp of source md file (*.md) and existing html file
            if os.path.getmtime(md_name) < os.path.getmtime(html_name):
                ## the html file is already latest, no need to generate it again
                continue
        md2html(md_name, html_name)

if __name__ == '__main__':
    parser = argparse.ArgumentParser(prog='asl_parser')
    parser.add_argument('indir', type=str, nargs='?', default='asl', help='Input dir. Default to current directory.')
    parser.add_argument('-o', '--output', dest='outdir', default='build', help='Output dir.')
    parser.add_argument('--rebuild', dest='rebuild', action='store_true', help='Rebuild all files.')
    parser.add_argument('--tag', dest='tag', type=str, help='Git tag for metadata update.')
    args = parser.parse_args()
    checkDirExistence(args.outdir)

    ad = ASLData()
    ad.parseFiles(os.path.join(args.indir, '*.asl'))
    ad.generateHashInfo()
    ad.saveAsJson(os.path.join(args.outdir, 'asl_orig.json'))
    ad.doPostProcess(args.outdir)

    ad.generateOpModis(os.path.join(args.outdir, 'opmodi.txt'))
    ad.generateOpFuncs(os.path.join(args.outdir, 'opfunc.txt'))

    ad.generateEncodingPics(args.outdir, args.rebuild)
    ad.saveEncodingAsXLSX(os.path.join(args.outdir, 'encoding.xlsx'))
    ad.saveAsJson(os.path.join(args.outdir, 'asl_proc.json'))
    ## generate markdown files
    md_dir = os.path.join(args.outdir, 'md')
    html_dir = os.path.join(args.outdir, 'html')
    ad.generateMarkdown(md_dir, tag=args.tag, rebuild=args.rebuild)

    ## copy all .md files from doc directory to output directory's md subdirectory
    ## becasue these assests will be referenced by automatically generated md/html files
    ## metadata field will also be embeded for these assets
    doc_dir = 'doc'
    if os.path.exists(doc_dir) and os.path.isdir(doc_dir):
        print(f'Copying .md files from {doc_dir} to {md_dir}...')
        for file in os.listdir(doc_dir):
            if file.endswith('.md'):
                src_path = os.path.join(doc_dir, file)
                dst_path = os.path.join(md_dir, file)
                ## load original file content and embed metadata
                md_with_meta = generateMetaForMdAssets(src_path, args.tag)
                ## write to dst file path
                with open(dst_path, 'w', encoding='utf-8') as fout:
                    fout.write(md_with_meta)
                ## notification to user
                print(f'Copied {file} to {md_dir}')
    else:
        print(f'Doc directory {doc_dir} does not exist or is not a directory. Skipping file copy.')

    ad.saveAsPickle(os.path.join(args.outdir, 'asl_data.pickle'))
    ## generate html files for all md files (including copied external assets)
    batch_cvt(md_dir, html_dir, args.rebuild)

    
