# -*- coding: utf-8 -*-
import re
from pathlib import Path

import pandas as pd

class Merge:
    def __init__(self):
        self.dai_cnt = 0
        self.df = pd.DataFrame()
        self.data_dict = {}
        self.result_dict = {}
        self.columns = ['TestName', 'Signal', 'NO', 'Site', 'LowLimit', 'HighLimit', 'Force', 'Measure']
        self.update_path = {}
        self.mergeDf = pd.DataFrame()


    def txtToCsv(self, file_path):
        # Compile regular expressions outside the loop for efficiency
        result_regex = re.compile(r'(\d+)\s+(\d+)\s+(\d+)\s+(\d+)\s+(\d+)\s+(\d+)\s+(\d+)')
        header_position = 'sot ============================================================>'
        end_position = 'eot ============================================================<'
        # Initialize flags and path in update_path
        dai_start_flag = False
        dai_end_flag = False
        self.update_path[self.dai_cnt] = file_path

        # Open file using context manager
        with open(file_path, "r") as file:
            for line in file:
                lineData = line.strip()
                if not lineData:
                    continue

                # Handle the start of a DAI block
                if not dai_start_flag and header_position in lineData:
                    dai_start_flag = True
                    dai_end_flag = False

                if not dai_start_flag:
                    continue

                # Modify data and split into list
                modified_data = re.sub(r'(\d)\s([munAVμ%])', r'\1\2', lineData)  # Remove space between data and unit
                data_list = modified_data.split()

                # Handle the end of a DAI block
                if not dai_end_flag and end_position in modified_data:
                    dai_end_flag = True

                if dai_end_flag:
                    result = result_regex.findall(modified_data)
                    if result:
                        xiu_res = [result[0][i] for i in [5, 6, 0, 1, 2, 3, 4]]
                        self.result_dict[self.dai_cnt] = xiu_res
                        self.dai_cnt += 1
                        dai_start_flag = dai_end_flag = False
                    continue

                # Process and store data if the data_list contains the expected number of elements
                if len(data_list) >= 8:
                    testName, signal = data_list[3], data_list[4]

                    # Skip unwanted testName/signal combinations
                    if testName in ['RRAM_READ_ECCBYPASS1_MC'] or signal in ['FeRAM_function.dma_mto_scan', 'FeRAM_function.rram_write_eccbypass_mc']:
                        continue
                        # Skip unwanted testName/signal combinations
                    if (testName, signal) in [('RRAM_PUMP', 'FeRAM_function.rram_pump_search')]:
                        continue

                    result_pass, measure = data_list[2], data_list[5]
                    flag = f"{testName} {signal}"

                    if flag in self.data_dict:
                        self.data_dict[flag][self.dai_cnt] = [result_pass, measure]
                    else:
                        no, site, lowLimit, highLimit = data_list[0], data_list[1], data_list[6], data_list[7]
                        force = data_list[8] if len(data_list) == 9 else "NAN"
                        connect = [result_pass, measure]
                        if self.dai_cnt > 0:
                            connect = [''] * self.dai_cnt + connect

                        self.data_dict[flag] = {
                            "initial_data": [testName, signal, no, site, lowLimit, highLimit, force],
                            self.dai_cnt: connect
                        }

    def toCSV(self, save_path):
        # Initialize lists for each column
        columns = {
            'TestName': [''] * 8,
            'Signal': [''] * 8,
            'NO': [''] * 8,
            'Site': [''] * 8,
            'LowLimit': [''] * 8,
            'HighLimit': [''] * 8,
            'Force': ["XCoord", "YCoord", "Site", "Fail", "Total", "Cate", "Bin", "FileName"]
        }

        # Populate the lists with data from data_dict
        for res in self.data_dict.keys():
            columns['TestName'].append(self.data_dict[res]['initial_data'][0])
            columns['Signal'].append(self.data_dict[res]['initial_data'][1])
            columns['NO'].append(self.data_dict[res]['initial_data'][2])
            columns['Site'].append(self.data_dict[res]['initial_data'][3])
            columns['LowLimit'].append(self.data_dict[res]['initial_data'][4])
            columns['HighLimit'].append(self.data_dict[res]['initial_data'][5])
            columns['Force'].append(self.data_dict[res]['initial_data'][6])

        # Process result_dict and add results and measurements to columns
        for key in self.result_dict.keys():
            result_i = [''] * 7
            result_i.append(self.update_path.get(key, ''))

            measure_i = self.result_dict[key] + ['']
            result_s = f'Result{key}'
            measure_s = f'Measure{key}'

            for tNS in self.data_dict.keys():
                if key in self.data_dict[tNS].keys():
                    result_i.append(self.data_dict[tNS][key][0])
                    measure_i.append(self.data_dict[tNS][key][1])
                else:
                    result_i.append('')
                    measure_i.append('')

            # Add the result and measure columns to the dictionary
            columns[result_s] = result_i
            columns[measure_s] = measure_i

        # Convert the dictionary of lists to a DataFrame
        self.df = pd.concat([pd.DataFrame({k: v}) for k, v in columns.items()], axis=1)
        if save_path.endswith('.txt'):
            save_path = save_path.replace('.txt', '.csv')
        self.df.to_csv(save_path, index=False)


class Connect:
    def __init__(self, csv_path):
        self.result_dict = {}
        self.df = pd.DataFrame()
        self.data_dict = {}
        self.start_cnt = 0
        self.dai_cnt = 0
        self.csv_path = csv_path
        self.init_data()
        self.new_add_dict = {}

    def init_data(self):
        self.df = pd.read_csv(self.csv_path, low_memory=False)
        select = self.df.dropna(subset=["TestName", "Signal"])

        select_list = (select["TestName"] + " " + select["Signal"]).tolist()
        for flag in select_list:
            self.data_dict[flag] = {}

        self.dai_cnt = self.get_max_result()
        self.start_cnt = self.get_max_result()
        self.update_path = {}
        self.new_add_dict = {}

    def txtToCsv(self, txt_path):
        # Pre-compile regular expressions outside the loop for efficiency
        result_regex = re.compile(r'(\d+)\s+(\d+)\s+(\d+)\s+(\d+)\s+(\d+)\s+(\d+)\s+(\d+)')
        header_position = 'sot ============================================================>'
        end_position = 'eot ============================================================<'
        # Initialize flags and store the current file path
        dai_start_flag = False
        dai_end_flag = False
        self.update_path[self.dai_cnt] = txt_path

        # Open and process the file line by line
        with open(txt_path, "r") as file:
            for line in file:
                line_data = line.strip()
                if not line_data:
                    continue

                # Detect the start of a DAI block
                if not dai_start_flag and header_position in line_data:
                    dai_start_flag = True
                    dai_end_flag = False

                if not dai_start_flag:
                    continue

                # Modify the data and split into list
                modified_data = re.sub(r'(\d)\s([munAVμ%])', r'\1\2', line_data)
                data_list = modified_data.split()

                # Detect the end of a DAI block
                if not dai_end_flag and end_position in modified_data:
                    dai_end_flag = True

                if dai_end_flag:
                    # Extract and store results
                    result = result_regex.findall(modified_data)
                    if result:
                        xiu_res = [result[0][i] for i in [5, 6, 0, 1, 2, 3, 4]]
                        self.result_dict[self.dai_cnt] = xiu_res
                        self.dai_cnt += 1
                        dai_start_flag = dai_end_flag = False
                    continue

                # Skip if data_list doesn't have the expected number of elements
                if len(data_list) < 8:
                    continue

                test_name, signal = data_list[3], data_list[4]

                # Skip unwanted test_name/signal combinations
                if test_name == 'RRAM_READ_ECCBYPASS1_MC' or signal in ['FeRAM_function.dma_mto_scan',
                                                                        'FeRAM_function.rram_write_eccbypass_mc']:
                    continue

                if (test_name, signal) == ('RRAM_PUMP', 'FeRAM_function.rram_pump_search'):
                    continue

                result_pass, measure = data_list[2], data_list[5]
                flag = f"{test_name} {signal}"

                if flag in self.data_dict:
                    if flag in self.new_add_dict:
                        self.new_add_dict[flag][self.dai_cnt] = [result_pass, measure]
                    else:
                        self.data_dict[flag][self.dai_cnt] = [result_pass, measure]
                else:
                    no, site, low_limit, high_limit = data_list[0], data_list[1], data_list[6], data_list[7]
                    force = data_list[8] if len(data_list) == 9 else "NAN"

                    # Initialize dictionaries if the flag is not present
                    self.data_dict[flag] = {}
                    self.new_add_dict[flag] = {
                        "initial_data": [test_name, signal, no, site, low_limit, high_limit, force],
                        self.dai_cnt: [result_pass, measure]
                    }

    def connectCsv(self, save_path):
        columns = {}
        cnt = self.start_cnt

        # Loop through the keys in result_dict
        for key in self.result_dict:
            # Initialize result and measure columns
            result_i = [''] * 7
            result_i.append(self.update_path.get(key, ''))
            measure_i = self.result_dict[key] + ['']

            # Create column names
            result_s = f'Result{cnt}'
            measure_s = f'Measure{cnt}'

            # Populate the result and measure columns based on data_dict
            for tNS in self.data_dict:
                if key in self.data_dict[tNS]:
                    result_i.append(self.data_dict[tNS][key][0])
                    measure_i.append(self.data_dict[tNS][key][1])
                else:
                    result_i.append('')
                    measure_i.append('')

            # Add the result and measure columns to the dictionary
            columns[result_s] = result_i
            columns[measure_s] = measure_i
            cnt += 1

        # Convert the dictionary to a DataFrame and concatenate it with the existing DataFrame
        df_value = pd.DataFrame(columns)
        self.df = pd.concat([self.df, df_value], axis=1)

        # If there are new additions, append them to the DataFrame
        if self.new_add_dict:
            for tNS, data in self.new_add_dict.items():
                lc = data['initial_data']

                for i in range(self.dai_cnt):
                    lc.append(data.get(i, ['', ''])[0])  # Add result_pass
                    lc.append(data.get(i, ['', ''])[1])  # Add measure

                # Add new row to the DataFrame
                self.df.loc[len(self.df)] = lc

        # Save the DataFrame to a CSV file
        self.df.to_csv(save_path, index=False)

    def get_max_result(self):
        cnt = 0
        header = pd.read_csv(self.csv_path, nrows=0)
        for i in header:
            if "Result" in i:
                cnt += 1
        return cnt


class DisposeTxt:
    def __init__(self):
        self.df = pd.DataFrame()
        self.reData = re.compile(r'\d+\s+\d+\s+\S+\s+\S+\s+(\d+)_(\d+)_(\d+)\s+([+-.\d]+)\s*(mA|uA|mV|V|nA)\s+')
        self.data_dict = {}
        self.files = []
        self.index = 0

        # Store the configurations for dataPage calls
        self.page_configs = [
            {'start': 0, 'end': 511, 'sheet': 'All Data'},
            {'min_split': 10, 'sheet': '<10'},
            {'max_split': 10, 'sheet': '>=10'},
            {'start': 0, 'end': 1, 'sheet': '0-1'},
            {'start': 2, 'end': 81, 'sheet': '2-81'},
            {'start': 82, 'end': 161, 'sheet': '82-161'},
            {'start': 162, 'end': 256, 'sheet': '162-256'},
            {'start': 257, 'end': 336, 'sheet': '257-336'},
            {'start': 337, 'end': 416, 'sheet': '337-416'},
            {'start': 417, 'end': 511, 'sheet': '417-511'}
        ]

    def dataPage(self, start=0, end=511, sheet=None, writer=None, min_split=None, max_split=None):
        # Filter the data within the specified range
        filtered_data = {
            key: item for key, item in self.data_dict.items()
            if start <= item['row'] <= end
        }

        # Create DataFrame columns for rows, cols, and bits
        df_new = pd.DataFrame({
            'Row': [item['row'] for item in filtered_data.values()],
            'Col': [item['col'] for item in filtered_data.values()],
            'Bit': [item['bit'] for item in filtered_data.values()]
        })

        # Determine whether to apply split logic based on min_split/max_split
        split = min_split if min_split else max_split
        comparison = (lambda measure: measure < split) if min_split else (lambda measure: measure >= split)

        for i, file in enumerate(self.files):
            file_name = Path(file).stem
            if split is not None:
                df_new[file_name] = [
                    measure if measure != '' and comparison(measure) else ''
                    for measure in [item['measure'][i] for item in filtered_data.values()]
                ]
            else:
                df_new[file_name] = [item['measure'][i] for item in filtered_data.values()]

        # Write the DataFrame to Excel if writer and sheet are provided
        if writer is not None and sheet is not None:
            df_new.to_excel(writer, index=False, sheet_name=sheet)

    def extractTXT(self, file_path):
        self.files.append(file_path)
        with open(file_path, "r") as file:
            for line in file:
                extract_data = self.reData.findall(line.strip())
                if extract_data:
                    row, col, bit, measure = extract_data[0][:4]
                    key = f'{row}_{col}_{bit}'
                    measure_value = float(measure)
                    if key in self.data_dict:
                        self.data_dict[key]['measure'].append(measure_value)
                    else:
                        self.data_dict[key] = {
                            'row': int(row),
                            'col': int(col),
                            'bit': int(bit),
                            'measure': [''] * self.index + [measure_value]
                        }
        self.index += 1

    def save_to_excel(self, writer):
        for config in self.page_configs:
            self.dataPage(writer=writer, **config)
        print("\033[31m正在写入，请稍等！")