import csv
import os
import cantools
import sys
import json
import re

def read_csv_data_from_file(file_path):
    def read_single_csv(file_path):
        with open(file_path, 'r') as file:
            csv_reader = csv.reader(file)
            headers = next(csv_reader)  # 跳过标题行
            csv_data = list(csv_reader)
        return csv_data

    def read_all_csvs_in_directory(directory_path):
        all_csv_data = []
        for root, _, files in os.walk(directory_path):
            for file in files:
                if file.endswith('.csv'):
                    full_path = os.path.join(root, file)
                    all_csv_data.extend(read_single_csv(full_path))
        return all_csv_data

    if os.path.isfile(file_path) and file_path.endswith('.csv'):
        return read_single_csv(file_path)
    elif os.path.isdir(file_path):
        return read_all_csvs_in_directory(file_path)
    else:
        raise ValueError(f"'{file_path}' is neither a valid CSV file nor a directory.")

def read_dbc_data_from_dir(dbc_file_dir):
    dbc_data = []
    dbc_files = []
    for root, dirs, files in os.walk(dbc_file_dir):
        for file in files:
            if file.endswith(".dbc"):
                dbc_files.append(os.path.join(root, file))

    for dbc_file in dbc_files:
        try:
            db = cantools.database.load_file(dbc_file)
            dbc_data.append((dbc_file, db))
        except Exception as e:
            print(f"Failed to load DBC file {dbc_file}: {e}")
    return dbc_data

def read_multi_db_message_from_db(dbc_data):
    message_dict = {}
    for dbc_file, db in dbc_data:
        for message in sorted(db.messages, key=lambda msg: msg.frame_id):
            if message.name not in message_dict:
                message_dict[message.name] = set()
            message_dict[message.name].add(dbc_file)
    
    multi_db_message = {}
    for message_name, dbc_files in message_dict.items():
        if len(dbc_files) > 1:
            if message_name not in multi_db_message:
                multi_db_message[message_name] = {}
            multi_db_message[message_name] = dbc_files
    return multi_db_message;


# 自定义排序键函数，以提取消息名称中的字母和数字部分并进行比较
def custom_sort_key(message_name):
    # 使用正则表达式分离字母和数字部分
    match = re.match(r'([A-Za-z]+)(\d+)', message_name)
    if match:
        letters, number = match.groups()
        return (letters, int(number))
    else:
        return (message_name, float('inf'))  # 如果没有找到匹配，将其放在最后

def extract_unique_messages_and_signals(csv_data):
    csv_messages = {}
    duplicate_cvs_messages = {}

    message = ""
    for row in csv_data:
        dbc = row[0]
        if row[1]:
            message = row[1]
        if not row[2]:
            continue
        signal = row[2]

        if dbc not in csv_messages:
            csv_messages[dbc] = {}
        if message not in csv_messages[dbc]:
            csv_messages[dbc][message] = set()
        if signal not in csv_messages[dbc][message]:
            csv_messages[dbc][message].add(signal)
        else:
            if dbc not in duplicate_cvs_messages:
                duplicate_cvs_messages[dbc] = {}
            if message not in duplicate_cvs_messages[dbc]:
                duplicate_cvs_messages[dbc][message] = set()
            if signal not in duplicate_cvs_messages[dbc][message]:
                duplicate_cvs_messages[dbc][message].add(signal)
    
    # 按照自定义键对消息名称进行排序，并对 dbc 进行排序
    sorted_csv_messages = {
        dbc: {msg: sorted(signals) for msg, signals in sorted(msgs.items(), key=lambda item: custom_sort_key(item[0]))}
        for dbc, msgs in sorted(csv_messages.items(), key=lambda item: custom_sort_key(item[0]))
    }

    sorted_duplicate_messages = {
        dbc: {msg: sorted(signals) for msg, signals in sorted(msgs.items(), key=lambda item: custom_sort_key(item[0]))}
        for dbc, msgs in sorted(duplicate_cvs_messages.items(), key=lambda item: custom_sort_key(item[0]))
    }
    return sorted_csv_messages, sorted_duplicate_messages


def generate_output_file(sorted_cvs_messages, filename):
    first_line = '"'
    params_message_signal = []
    for dbc, messages in sorted_cvs_messages.items():
        for message, signals in messages.items():
            message_signal = f"{dbc}.{message}"
            params_message_signal.append(message_signal)
            first_line += message_signal + ","
    first_line = first_line.rstrip(',')
    first_line += '","'

    code_block = """
yyjson_mut_doc *yyjson_doc = yyjson_mut_doc_new(NULL);
if (yyjson_doc) {
    yyjson_mut_val *yyjson_root = yyjson_mut_obj(yyjson_doc);
"""
    for dbc, messages in sorted_cvs_messages.items():
        for message, signals in messages.items():
            for signal in signals:
                message_signal = f"{dbc}.{message}"
                index = params_message_signal.index(message_signal)
                code_block += f'    pivot::chassis_utils::GetTwoJsonObject(yyjson_doc, yyjson_root, ""{message}"", ""{signal}"", ${index+1}.{signal});\n'

    code_block += """
    yyjson_mut_doc_set_root(yyjson_doc, yyjson_root);
    size_t json_length = 0;
    const char *json_str = yyjson_mut_write(yyjson_doc, YYJSON_WRITE_NOFLAG, &json_length);
    if (json_str) {
      $0 = json_str;
      free((void *)json_str);
    }
    yyjson_mut_doc_free(yyjson_doc);
}
"
"""
    with open(filename, 'w') as file:
        file.write(first_line + '\n')
        file.write(code_block.strip())


def generate_ssp_mapping_file(sorted_cvs_messages, filename, param_path):
    with open(filename, 'w', newline='', encoding='utf-8') as file:
        module_type = "vehicle"
        writer = csv.writer(file)
        writer.writerow(["msf_input", "msf_output", "signal_mapping_custom", "module_type"])

        for dbc_name, messages in sorted_cvs_messages.items():
            for message_name, signals in messages.items():
                for signal_name in signals:
                    msf_input = f"{dbc_name}.{message_name}.{signal_name}"
                    msf_output = f"{param_path}.{dbc_name}.{message_name}.{signal_name}"
                    mapping = f"$0=$1"
                    writer.writerow([msf_input, msf_output, mapping, module_type])

def generate_dgw_file(filter_messages, filename, dbc_data, dbc_match):
    with open(filename, 'w', newline='', encoding='utf-8') as file:
        writer = csv.writer(file)
        writer.writerow(["dbc", "dbc_file", "message_name", "frame_id", "message_length", "message_length_total", "msf_message_header_len", "someip_channel_len", "someip_channel"])
        #header 头 20 字节 + cannum
        #v1.0 v1.1 msf 的头均是 15 字节，
        msf_header_len = 20 + 2
        msf_message_header_len = 15
        someip_channel = 0
        someip_channel_len = msf_header_len
        message_length_total = 0
        for dbc_file, db in dbc_data:
            for match_dbc_name, match_dbc_file in dbc_match.items():
                if match_dbc_file not in dbc_file:
                    continue
                for message in sorted(db.messages, key=lambda msg: msg.frame_id):
                    if match_dbc_name not in filter_messages:
                        continue
                    if message.name in filter_messages[match_dbc_name]:
                        message_length_total += message.length
                        if someip_channel_len + msf_message_header_len + message.length > 1300:
                            someip_channel_len = msf_header_len
                            someip_channel += 1
                        someip_channel_len += msf_message_header_len + message.length
                        writer.writerow([match_dbc_name, match_dbc_file, message.name, message.frame_id, message.length, message_length_total, msf_message_header_len, someip_channel_len, someip_channel,])

def filter_messages_and_signals(sorted_cvs_messages, dbc_data, dbc_match, multi_db_message):
    filter_messages = {}
    no_exist_messages = {}
    fix_messages = {}

    for dbc_name, messages in sorted_cvs_messages.items():
        for message_name, signals in messages.items():
            for signal_name in signals:
                exists_in_dbc = False
                fix_in_dbc = False
                fix_dbc_name = dbc_name
                for dbc_file, db in dbc_data:
                    if dbc_match:
                        if fix_dbc_name not in dbc_match:
                            continue
                        if dbc_match[fix_dbc_name] not in dbc_file:
                            continue
                    if any(message.name == message_name and signal.name == signal_name for message in db.messages for signal in message.signals):
                        exists_in_dbc = True
                        break
                
                if not exists_in_dbc and message_name not in multi_db_message:
                    for dbc_file, db in dbc_data:
                        if any(message.name == message_name and signal.name == signal_name for message in db.messages for signal in message.signals):
                            for match_dbc_name, match_dbc_file in dbc_match.items():
                                if match_dbc_file in dbc_file:
                                    fix_dbc_name = match_dbc_name
                                    break
                            fix_in_dbc = True
                            break

                    if fix_in_dbc:
                        if fix_dbc_name not in fix_messages:
                            fix_messages[fix_dbc_name] = {}
                        if message_name not in fix_messages[fix_dbc_name]:
                            fix_messages[fix_dbc_name][message_name] = set()
                        if signal_name not in fix_messages[fix_dbc_name][message_name]:
                            fix_messages[fix_dbc_name][message_name].add(signal_name)
                    else:
                        if fix_dbc_name not in no_exist_messages:
                            no_exist_messages[fix_dbc_name] = {}
                        if message_name not in no_exist_messages[fix_dbc_name]:
                            no_exist_messages[fix_dbc_name][message_name] = set()
                        if signal_name not in no_exist_messages[fix_dbc_name][message_name]:
                            no_exist_messages[fix_dbc_name][message_name].add(signal_name)

                if exists_in_dbc or fix_in_dbc:
                    if fix_dbc_name not in filter_messages:
                        filter_messages[fix_dbc_name] = {}
                    if message_name not in filter_messages[fix_dbc_name]:
                        filter_messages[fix_dbc_name][message_name] = set()
                    if signal_name not in filter_messages[fix_dbc_name][message_name]:
                        filter_messages[fix_dbc_name][message_name].add(signal_name)
    sorted_filter_messages = {
        dbc: {msg: sorted(signals) for msg, signals in sorted(msgs.items(), key=lambda item: custom_sort_key(item[0]))}
        for dbc, msgs in sorted(filter_messages.items(), key=lambda item: custom_sort_key(item[0]))
    }
    return sorted_filter_messages, no_exist_messages, fix_messages

if __name__ == "__main__":
    input_csv_data = "./signals.csv"
    input_dbc_file_dir = "../../../"
    output_mapping = "./pass_through_mapping.txt"
    output_ssp = "./pass_through_ssp.csv"
    output_param_path = "/vehicle/chassis_report.extra.json"
    output_dgw = "./pass_through_dgw.csv"
    dbc_match = {}
    if len(sys.argv) == 1: # code debug
        dbc_match = {
            "CAN_ADAS": "ADC_ADASLampLocal_ADC_0000-a.dbc",
            "CAN_G2M": "ADC_GLOBAL2M_ADC_0001-a.dbc",
            "CAN_G5M": "ADC_GLOBAL5M_ADC_0001-a.dbc",
            "CAN_CPAP": "CPAP.dbc"
        }
    elif len(sys.argv) == 2:
        try:
            with open(sys.argv[1], 'r') as f:
                params = json.load(f)
                input_csv_data = params.get("input_csv_data", input_csv_data)
                input_dbc_file_dir = params.get("input_dbc_file_dir", input_dbc_file_dir)
                output_mapping = params.get("output_mapping", output_mapping)
                output_ssp = params.get("output_ssp", output_ssp)
                output_param_path = params.get("output_param_path", output_param_path)
                dbc_match = params.get("dbc_match", dbc_match)
        except Exception as e:
            print(f"Error reading JSON file: {e}")
            exit(1)
       
    else:
        print(f"invalid argv:{sys.argv}")
        exit(1)


    # 提取唯一消息和信号并排序
    csv_data = read_csv_data_from_file(input_csv_data)
    sorted_cvs_messages, duplicate_cvs_messages = extract_unique_messages_and_signals(csv_data)
    dbc_data = read_dbc_data_from_dir(input_dbc_file_dir)
    multi_db_message = read_multi_db_message_from_db(dbc_data)
    # 过滤掉信号不在dbc 文件中的内容
    filter_messages, no_exist_signals, fix_messages = filter_messages_and_signals(sorted_cvs_messages, dbc_data, dbc_match, multi_db_message)

    # 生成文件
    generate_output_file(filter_messages, output_mapping)
    generate_ssp_mapping_file(filter_messages, output_ssp, output_param_path)
    generate_dgw_file(filter_messages, output_dgw, dbc_data, dbc_match)

    if multi_db_message:
        print("========================dbc 文件中重复信号如下========================")
        print(multi_db_message)
    # if fix_messages:
    #     print("========================dbc 文件中修复信号如下========================")
    #     print(fix_messages)
    # if duplicate_cvs_messages:
    #     print("========================cvs 文件中重复的信号如下========================")
    #     print(duplicate_cvs_messages)
    # if no_exist_signals:
    #     print("========================dbc 文件中无法找到信号如下========================")
    #     print(no_exist_signals)
