# 
# 2020.09.12    Tam        remove option -binMap, hardcode binMap name in script
#                          when datafile is empty, raise 'empty datafile ' err instead of SN_NUMBER
# 2020.09.13    Hiroshi    Applied regex to capture PROD, LOT, TIME infor from both production data file and historical data file (*1)
# 2020.09.16    Tam        Added empty file check during loop of record
# 2020.09.24    Tam        Fix a bug: endless loop when datafile has multiple segs
# 2020.09.25    Tam        remove useless code, add some argument options

import struct
import re, os, sys, gzip
import time
import io
import traceback
import argparse
from TOOLS import *

parser = argparse.ArgumentParser()
parser.add_argument('DAT_FIL', help='datafile filename')
parser.add_argument('-debug', required=True, type=int,
                    help='if set to 1, keep temp file and print more info. default 0')
parser.add_argument('-FT_BIN_MAP', help = 'bin mapping file for FT, default = CSVFinalTestsFileCombinedForDatalog20200522.csv',
                    default = 'CSVFinalTestsFileCombinedForDatalog20200522.csv')
args = parser.parse_args()

DAT_FIL = args.DAT_FIL
TMP_FIL = DAT_FIL + '.csv.tmp1'
PRG_FIL = DAT_FIL + '.prg'
FIN_OUT = ''
GLB = {}


def main():
    global FIN_OUT
    BIN_MAP_DIR = os.path.join(os.getenv("DPREF"), "dat2res")
    BIN_MAP = os.path.join(BIN_MAP_DIR, args.FT_BIN_MAP)
    
    ## define the length of data type in 'struct' module.
    # script will generate all these classes with corresponding data length.
    # class i with method RW, i.RW will read 4bytes and parse it as 'int' data type. 
    # I is unsigned int. l: long int. L: unsigned long int. f: float. s: string. c: char. b: byte. h: short int. H: unsigned short int.
    type_table = ['s', 'c', 'b', 'B', 'h', 'H', 'i', 'I', 'l', 'L', 'q', 'Q', 'e', 'f', 'd']
    len_table = [1, 1, 1, 1, 2, 2, 4, 4, 4, 8, 8, 8, 2, 4, 8]
    #####################################################################################################################
    M = re.findall(r'^(.*)(.gz|.GZ)$', DAT_FIL)
    if not M:
        BASE_NAME = os.path.basename(DAT_FIL)
    else:
        BASE_NAME = os.path.basename(M[0][0])
    DIR_NAM = os.path.dirname(DAT_FIL)
    # file name e.g. SQ4940AEY-T1-GE3-V_P01D449.10_FT_2020051511324800.dat.gz
    # historical data e.g. SQ2361ES-T1_GE3-V_J52D257.7_QA_20180309064646.dat.gz
    
    match = re.search(r'^(\S+)_(\S+)_(FT|QA)_(\d{14})\.(dat|DAT)(\.gz)?$', BASE_NAME)
    if match:
        PRODUCT_ID = match.group(1)
        LOT_ID = match.group(2)
        TYPE_ID = match.group(3)
        TIME = match.group(4)  # for FT (hist) always has timestamp
    else:
        exit_for_loader(1, "incorrect filename")
    
    BASE_NAME = '_'.join([PRODUCT_ID, LOT_ID, TYPE_ID, TIME]) + '.res'
    FIN_OUT = os.path.join(DIR_NAM, BASE_NAME)  # output fileName
    
    
    # bin_mapping
    bin_mapping_list = []
    with open(BIN_MAP) as BIN:
        BIN.readline()
        for line in BIN:
            line = line.strip()
            bin_mapping_list.append(line.split(','))
    
    def find_name_by_bin(bin_num): #bin name
        if GLB['wafer_map_exist']:
            if bin_num == 1:
                return 'PASS'
        else:
            if bin_num in (1, 2):
                return 'PASS'
        for i in bin_mapping_list:
            if int(i[7]) == bin_num:
                return i[8]
        raise Exception(f"can not find name by bin number {bin_num}\n")
    
    def find_name_by_test_num(test_num): #bin name
        for i in bin_mapping_list:
            tmp = [int(j) for j in i[2].split('/')]
            if test_num in tmp:
                return i[8]
        raise Exception(f"can not find name by test number {test_num}\n")
    
    def find_bin_by_test_num(test_num):
        for i in bin_mapping_list:
            tmp = [int(j) for j in i[2].split('/')]
            if test_num in tmp:
                return int(i[7])
        raise Exception(f"can not find bin number by test number {test_num}\n")
    
    def find_enabled_by_test_num(test_num): #OTI_enabled
        for i in bin_mapping_list:
            test_num_group = i[2].split('/')
            test_num_group = [int(i) for i in test_num_group]
            if test_num in test_num_group:
                return int(i[0])
        raise Exception(f'can not find enabled_value by test number {test_num}\n')
    
    # func
    def output(name, unpacked, data, d_binary=False, bin_len=1):
        data1 = data.hex()
        data2 = bin(int(data1, 16))[2:].rjust(8 * bin_len, "0")
        if d_binary:
            unpacked = data2
        W.write(f'{name},{unpacked},{data1},{data2}\n')
    
    def b2int(data):
        return int(data.hex(), 16)
    
    # def type class
    ## CreateBasicClass will generate all classes defined in 'type_table'
    # RW method will READ data with size self.length and WRITE parsed data to tmp1 file.
    def CreateBasicClass(tp_name):
        class Basic:
            def __init__(self, name):
                self.name = name
                self.tp_name = tp_name
                indx = type_table.index(tp_name)
                self.length = len_table[indx]
    
            def RW(self):
                data = R.read(self.length)
                if self.tp_name not in ('c'):
                    unpacked = struct.unpack(self.tp_name, data)[0]
                elif self.tp_name == 'c':
                    unpacked = struct.unpack(self.tp_name, data)[0].decode()
                output(self.name, unpacked, data, bin_len = self.length)
                return unpacked
        return Basic
    
    for j in type_table:
        globals()[j] = CreateBasicClass(j)
    
    #class TP_SKIP is used to simply skip empty data.
    class TP_SKIP:
        def __init__(self, d_length):
            self.tp_name = 'TP_SKIP'
            self.length = d_length
    
        def RW(self):
            R.read(self.length)
    
    #class TP_STRING parse string type data. It can deal with data named DATE.
    class TP_STRING:
        def __init__(self, name, d_length):
            self.name = name
            self.tp_name = 'TP_STRING'
            self.length = d_length
    
        def RW(self):
            data = R.read(1)
            if self.name == 'DATE':  # use timestamp in filename as DATE
                N = re.search(r'^(\d{4})(\d{2})(\d{2})(\d{2})(\d{2})(\d{2})(\d*)$', TIME)
                now = f'{N[1]}-{N[2]}-{N[3]} {N[4]}:{N[5]}:{N[6]}'
                GLB['time'] = now
                output(self.name, now, b'\x00')
                R.read(self.length - 1)
            elif data.hex() == '00':
                output(self.name, 'nan', b'\x00')
                R.read(self.length - 1)
            else:
                length_record = struct.unpack('B', data)[0]
                data2 = R.read(length_record)
                unpacked = struct.unpack(f'{length_record}s', data2)[0].decode()
                output(self.name, unpacked, data)
                R.read(self.length - 1 - length_record)
                return unpacked
    
    # self-defined type
    ## For below classes, pls refer to 'DS_FET_HVM (PARSER SPEC).docx'
    #e.g. There is field name DFG1 in the .docx, so creating a corresponding class TP_DFG1.
    #instance = TP_DFG1('a_name') instance.RW() will read 1 byte and parse it as the rule defines in .docx.
    #important classes: TP_DFG1, TP_SN_OR_WM, TP_RSLTF.
    #TP_DFG1:   wafer_map_exist = True or False
    #           long_SN_enabled = True or False(Series number = 2 if False else 4)
    #          'Series number' is in docx page15. 
    #TP_SN_OR_WM:
    #           SN_NUMBER(SN)
    #TP_RSLTF:  PF(pass or fail)
    
    class TP_DFG1:
        def __init__(self, name):
            self.name = name
            self.tp_name = 'TP_DFG1'
            self.length = 1
    
        def RW(self):
            data = R.read(1)
            data_int = b2int(data)
            bit0 = data_int & 0b00000001
            bit1 = (data_int & 0b00000010) >> 1
            bit2 = (data_int & 0b00000100) >> 2
            GLB['wafer_map_exist'] = True if bit0 else False
            GLB['convert2V12'] = True if bit1 else False
            GLB['long_SN_enabled'] = True if bit2 else False
            W.write(f'{self.name}\n')
            output(f'wafer_map_exist', GLB['wafer_map_exist'], data)
            output(f'convert2V12', GLB['convert2V12'], data)
            output(f'long_SN_enabled', GLB['long_SN_enabled'], data)
    
    class TP_BCD:
        def __init__(self, name, length):
            self.name = name
            self.tp_name = 'TP_BCD'
            self.length = length
    
        def RW(self):
            total = ''
            for i in range(self.length):
                data = R.read(1)
                num = b2int(data)
                num1 = (num & 0b11110000) >> 3
                num2 = num & 0b00001111
                total = total + str(num1) + str(num2)
            output(self.name, total, data)
            return total
    
    class TP_BLOS:
        def __init__(self, name):
            self.name = name
            self.tp_name = 'TP_BLOS'
            self.length = 1
    
        def RW(self):
            data = R.read(1)
            data_int = b2int(data)
            bit0 = data_int & 0b00000001
            bit1 = (data_int & 0b00000010) >> 1
            GLB['BIN_OPEN_CLOSE'] = 'OPEN' if bit0 == 0 else 'CLOSE'
            GLB['BIN_REJECT'] = True if bit1 == 1 else False
            W.write(f'{self.name}\n')
            output(f'BIN_OPEN_CLOSE', GLB['BIN_OPEN_CLOSE'], data)
            output(f'BIN_REJECT', GLB['BIN_REJECT'], data)
    
    class TP_SN_OR_WMP:
        def __init__(self, name):
            self.name = name
            self.tp_name = 'TP_SN_OR_WMP'
            self.length = None

        def RW(self, SN_CNT=[1]):
            W.write(f'{self.name}\n')
            self.length = 4 if GLB['long_SN_enabled'] else 2
            if 'SN_NUMBER' not in GLB.keys():
                GLB['SN_NUMBER'] = []
                
            if GLB['wafer_map_exist'] and not GLB['long_SN_enabled']:
                B('xcoord').RW()
                B('ycoord').RW()
                W.write(f"SN_NUMBER,{SN_CNT[0]}")
                GLB['SN_NUMBER'].append(SN_CNT[0])
                SN_CNT[0] += 1
            elif not GLB['wafer_map_exist'] and not GLB['long_SN_enabled']:
                tmp = H('SN_NUMBER').RW()
                GLB['SN_NUMBER'].append(int(tmp))
            elif not GLB['wafer_map_exist'] and GLB['long_SN_enabled']:
                tmp = I('SN_NUMBER').RW()           # If long_SN_enabled, SN_NUMBER size = 4 bytes, else 2 bytes. check len_table.
                GLB['SN_NUMBER'].append(int(tmp))
            elif GLB['wafer_map_exist'] and GLB['long_SN_enabled']:
                raise Exception('does not exist this case: wafer_map_exist = True, long_SN_enabled = True')
                #H('xcoord').RW()
                #H('ycoord').RW()
                #if 'SN_NUMBER' not in GLB.keys():
                #    GLB['SN_NUMBER'] = []
                #W.write(f"SN_NUMBER,{SN_CNT[0]}")
                #GLB['SN_NUMBER'].append(SN_CNT[0])
                #SN_CNT[0] += 1
            else:
                raise Exception('Error, Serial Number & wafer mapping')
    
    class TP_BIN_RESULT:
        def __init__(self, name):
            self.name = name
            self.tp_name = 'TP_BIN_RESULT'
            self.length = 1
    
        def RW(self):
            data = R.read(self.length)
            bit0_4 = b2int(data) & 0b11111
            W.write(f'{self.name}\n')
            output(f'hardBin', bit0_4, data)
            #hardBin is useless
            #if 'hardBin' not in GLB.keys():
            #    GLB['hardBin'] = []
            #GLB['hardBin'].append(bit0_4)
    
    class TP_RSLTF:
        def __init__(self, name):
            self.name = name
            self.tp_name = 'TP_RSLTF'
            self.length = 1
    
        def RW(self):
            data = R.read(1)
            data_int = b2int(data)
            #bit1 = (data_int & 0b00000010) >> 1
            #self.cover_on_fail = True if bit1 else False
            bit3 = (data_int & 0b00001000) >> 3
            self.PF = 'F' if bit3 else 'P'
            output('\tPF', self.PF, data)
            return data
    
    name1 = c('name1')
    name2 = c('name2')
    SSN = H('SSN')
    ESN = H('ESN')
    SNNUM = B('SNNUM')
    SNSIZE = H('SNSIZE')
    TEST = B('TEST')  # 32
    FCN = B('FCN')  # 32
    REM = TP_STRING('REM', 40)
    DATE = TP_STRING('DATE', 40)  # set script runtime as date/time
    START_SEG = B('START_SEG')
    END_SEG = B('END_SEG')
    DFG1 = TP_DFG1('DFG1')
    SCX = B('SCX')
    SCY = B('SCY')
    ECX = B('ECX')
    ECY = B('ECY')
    SPARE = TP_SKIP(4)
    run_filename = TP_STRING('run_filename', 15)
    test_filename = TP_STRING('test_filename', 15)
    BCNT = TP_BCD('BCNT', 4)  # 25
    BLOS = TP_BLOS('BLOS')  # 25
    RSCNT = TP_BCD('RSCNT', 4)  # 25
    RTCNT = TP_BCD('RTCNT', 4)
    LSSN = I('LSSN')
    LESN = I('LESN')
    SN_OR_WMP = TP_SN_OR_WMP('SN_OR_WMP')
    bin_result_bits = TP_BIN_RESULT('bin_result_bits')
    RSLTF = TP_RSLTF('RSLTF')
    RES = f('RES')
    
    if not M:
        R = open(DAT_FIL, 'rb')
    else:
        R = gzip.open(DAT_FIL, 'rb')
    R = io.BytesIO(R.read())
    
    GLB['FileSize'] = len(R.read())
    assert GLB['FileSize'] % 1536 == 0, "incomplete file"
    assert GLB['FileSize'] != 0, 'Empty file'
    
    ## parse record0: summary infomation
    R.seek(0, 0)
    W = open(TMP_FIL, 'w')
    section0 = [name1, name2, SSN, ESN, SNNUM, SNSIZE, TEST, FCN, REM, \
                DATE, START_SEG, END_SEG, DFG1, SCX, SCY, ECX, ECY, SPARE, \
                run_filename, test_filename, 'BCNT_BLOS', RSCNT, RTCNT]
    
    for i in section0:
        if i == TEST:
            GLB['TEST'] = []
            for j in range(32):
                test_number = i.RW()
                if test_number != 0:
                    GLB['TEST'].append(test_number)
        elif i == FCN:
            GLB['FCN'] = []
            for j in range(32):
                FCN_number = i.RW()
                GLB['FCN'].append(FCN_number)
        elif i == 'BCNT_BLOS':
            for j in range(25):
                BCNT.RW()
                BLOS.RW()
        elif i == RSCNT:
            for j in range(25):
                i.RW()
        elif i == test_filename:
            GLB['test_filename'] = test_filename.RW()
        elif i in [SSN, ESN, SNNUM, SNSIZE, REM, DATE, START_SEG, END_SEG, SCX, \
                   SCY, ECX, ECY, run_filename]:
            GLB[i.name] = i.RW()
        else:
            i.RW()
    if GLB['long_SN_enabled']:
        LSSN.RW()
        LESN.RW()
    
    ## skip current die. Goto next die. Because the tail of each die(SN) maybe has empty area with uncertain size.
    # This func should be provided SN_start(the location of the start of current die. e.g. It starts 1537 bytes.).
    # die_cnt, the SN_NUMBER(die index) in current record. such as 2nd die in current record.
    def next_SN(SN_start, die_cnt):
        if die_cnt % GLB['SNNUM'] != 0:
            R.seek(SN_start + GLB['SNSIZE'], 0)  # next die
        else:
            tmp = 1536 - GLB['SNSIZE'] * GLB['SNNUM']
            R.seek(SN_start + GLB['SNSIZE'] + tmp, 0)  # next record
    
    SN_LENGTH = 4 if GLB['long_SN_enabled'] else 2
    R.seek(1536, 0)
    record_cnt = 0
    #parse record 1..N
    while 1:#record
        if R.tell() == GLB['FileSize']:
            break
    
        record_cnt += 1
        if GLB['SNNUM'] == 0 or GLB['SNSIZE'] == 0 \
            or (GLB['START_SEG'] == GLB['END_SEG'] == 0) \
            or ('run_filename' not in GLB.keys() or GLB['run_filename'] is None) \
            or ('test_filename' not in GLB.keys() or GLB['test_filename'] is None) \
            or ('long_SN_enabled' not in GLB.keys() or GLB['long_SN_enabled'] is None):
            raise Exception("Empty file")
        W.write(f'record,{record_cnt}\n')
    
        for die_cnt in range(1, GLB['SNNUM'] + 1):
            SN_start = R.tell()
            ## If header of a record is empty, it suggest this record is empty. Skip this record.
            data = R.read(SN_LENGTH)
            R.seek(-1 * SN_LENGTH, 1)
            if b2int(data) == 0:
                next_SN(SN_start, die_cnt)
                continue
            SN_OR_WMP.RW()
            bin_result_bits.RW()
            seg_count = GLB['END_SEG'] - GLB['START_SEG'] + 1
            
            ##seg  test     res
            # In TMP_FIL:
            # 0     0        XX
            # 1     0        0(empty)
            # 0     1        0
            # 1     1        XX
            # 0     2        XX
            # 1     2        0
            # 0     3        0
            # 1     3        XX ...
            for i in range(len(GLB['TEST'])):
                for j in range(seg_count):
                    print(f"seg,{j}",file=W)
                    print(f"test_seq,{i}",file=W) #start from 0 
                    RSLTF.RW()
                    RES.RW()
            next_SN(SN_start, die_cnt)
    R.close()
    W.close()

    # reorganize output file
    with open(FIN_OUT, 'w') as W:
        with open(TMP_FIL, 'r') as R:  # record res, PF in temp_csv_outfile
            GLB['last_PF'] = {};
            GLB['RES_PER_SN']   = {}
            GLB['SKIP_TEST_SEQ'] = []
            for line in R:
                splited = line.strip().split(',')
                if len(splited) >= 1 and splited[0] == 'SN_NUMBER':
                    i = int(splited[1])
                    GLB['last_PF'][i] = []
                    GLB['RES_PER_SN'][i] = {}
                elif splited[0] == 'PF':
                    PF_TMP = splited[1]
                elif splited[0] == 'seg':
                    SEG_TMP = int(splited[1])
                    if SEG_TMP not in GLB['RES_PER_SN'][i].keys():
                        GLB['RES_PER_SN'][i][SEG_TMP] = {}
                elif splited[0] == 'test_seq':
                    TEST_SEQ_TMP = int(splited[1])
                elif splited[0] == 'RES':
                    if splited[1] != '0.0':
                        GLB['RES_PER_SN'][i][SEG_TMP][TEST_SEQ_TMP] = splited[1]
                        GLB['last_PF'][i] = PF_TMP

        # for multiple SEGs case, union all the SEGs result
        # dict structure: GLB['UNION_RES'][SN_NUMBER (die number)][TEST order(test_seq, not test number)]
        #                 GLB['RES_PER_SN'][SN_NUMBER][SEG number][TEST order(test_seq)]
        GLB['UNION_RES'] = {}
        for SN_TMP in GLB['RES_PER_SN'].keys():
            GLB['UNION_RES'][SN_TMP] = {}
            for SEG_TMP in GLB['RES_PER_SN'][SN_TMP].keys():
                GLB['UNION_RES'][SN_TMP].update(GLB['RES_PER_SN'][SN_TMP][SEG_TMP])
        # if a TEST is not executed, set the test res = ''
        for SN_TMP in GLB['UNION_RES'].keys():
            for i in range(len(GLB['TEST'])):
                if i not in GLB['UNION_RES'][SN_TMP].keys():
                    GLB['UNION_RES'][SN_TMP][i] = 'NA'

        # remove trailing ''
        for SN_TMP in GLB['UNION_RES'].keys():
            for i in reversed(range(len(GLB['TEST']))):
                if GLB['UNION_RES'][SN_TMP][i] != 'NA':
                    break
                else:
                    del GLB['UNION_RES'][SN_TMP][i]
        
        GLB['program'] = '.'.join(GLB['test_filename'].split('.')[:-1])
        soft_bin_list = []
        GLB['failedTestNumber'] = {}
        if 'SN_NUMBER' not in GLB.keys():
            raise Exception('empty datafile')
        for SN in GLB['SN_NUMBER']:
            tmp = max( GLB['UNION_RES'][SN].keys() )
            last_test_num = GLB['TEST'][tmp]
            soft_bin_num_tmp = 1 if GLB['last_PF'][SN] == 'P' else find_bin_by_test_num(last_test_num)
            GLB['failedTestNumber'][SN] = 0 if GLB['last_PF'][SN] == 'P' else last_test_num
            soft_bin_list.append(soft_bin_num_tmp)
        
        W.write('<BOH>\n')
        W.write(f'StartTime,{GLB["time"]}\n')
        W.write('StartTimeFmt,%Y-%m-%d %H:%M:%S\n')
        W.write(f'EndTime,{GLB["time"]}\n')
        W.write('EndTimeFmt,%Y-%m-%d %H:%M:%S\n')
        W.write('<BOH>\n\n')
        
        W.write('<BOSB>\n')
        W.write('binNumber,binName,PF,count\n')
        # soft_bin_list stores the failed bin num of each SN(die).
        for i in sorted(set(soft_bin_list), key = lambda x : int(x)):
            soft_bin_name_tmp = find_name_by_bin(i)
            PF_tmp = 'P' if soft_bin_name_tmp == 'PASS' else 'F'
            W.write(f'{i},{soft_bin_name_tmp},{PF_tmp},{soft_bin_list.count(i)}\n')
        W.write('<EOSB>\n\n')
        
        W.write('<BOR>\n')
        W.write('#Test_name,test_num,low_limit,high_limit\n')
        for i in GLB['TEST']:
            W.write(f'{find_name_by_test_num(i)},{i}\n')
        
        W.write('program,TesterType,PRODUCT_ID,LOT_ID,failedTestNumber,SN,softBin#,softBinName,RES')
        for i in GLB['SN_NUMBER']:
            if GLB['last_PF'][i] == 'P':
                bin_num_tmp = 1
                bin_name_tmp = 'PASS'
            else:
                #test_num = len(GLB['RES_PER_SN'][i])
                tmp = max(GLB['UNION_RES'][i].keys())
                test_num = GLB['TEST'][tmp]
                bin_num_tmp = find_bin_by_test_num(test_num)
                bin_name_tmp = find_name_by_test_num(test_num)
            
            W.write(f'\n{GLB["program"]},FETTEST,{PRODUCT_ID},{LOT_ID},{GLB["failedTestNumber"][i]},{i},{bin_num_tmp},{bin_name_tmp},')
            #write all the res of current die(SN, i)
            for j in sorted(GLB['UNION_RES'][i].keys()):
                W.write(f"{GLB['UNION_RES'][i][j]},")

        W.write('\n<EOR>')
    #some values should be passed to other scripts.
    with open(PRG_FIL, 'w') as W:
        print(GLB['program'], file=W)
        print(FIN_OUT, file=W)
        print(BIN_MAP, file=W)
#In debug mode, print traceback, keep temp file. Otherwise delete them.
if __name__ == '__main__':
    try:
        main()
    except BaseException as ins:
        if not args.debug:
            rm_file(FIN_OUT)
            rm_file(PRG_FIL)
            #exit_for_loader(1, repr(ins))
            raise Exception(repr(ins))
        else:
            #print(traceback.format_exc())
            raise Exception(repr(ins))
    finally:
        if not args.debug:
            rm_file(TMP_FIL)
