#!/usr/bin/env python
# -*- coding: utf-8 -*-

import csv
import os

# 1001:dir
dirmap = {}
# 1001:rulename
rule_name_map = {}
# 1001:[]
rule_data_map = {}

# rulename:{"chinese":"","priority":""}
rule_data_map_v2 = {}
# rulename:[{"name":"","path":""}]  like rule_data_map
rule_data_asset_list = {}

# mesh:[{"Name":"","DirID":""}]
# script:[{"Class":""}]
table_cache = {}

def process_table_cache():
    global table_cache
    cache_liset = [
        "UwaScan/table.animationclip.csv",
        "UwaScan/table.animator.csv",

        "UwaScan/table.animatorcontroller.csv",

        "UwaScan/table.audioclip.csv", 
        "UwaScan/table.material.csv", 
        "UwaScan/table.mesh.csv", 

        "UwaScan/table.particlesystem.csv",
        "UwaScan/table.skinnedmeshrenderer.csv",

        "UwaScan/table.texture.csv", 

        "UwaScan/table.uiimage.csv", 
        "UwaScan/table.uirawimage.csv", 
        "UwaScan/table.uitext.csv", 

        "UwaScan/table.videoclip.csv"]
    for curtable in cache_liset:
        module = curtable.split('.')[-2]
        table_cache[module] = []
        try:
            with open(curtable, 'r', encoding='utf-8') as f:
                csv_reader = csv.reader(f)
                next(csv_reader)  # 跳过表头
                for row in csv_reader:
                    table_cache[module].append({"Name":row[0],"DirID":row[1]})
        except FileNotFoundError:
            print(f"警告：找不到文件 {curtable}，已跳过")
            continue

    script_cache_list = [
        "UwaScan/table.script.csv"]
    for curtable in script_cache_list:
        module = curtable.split('.')[-2]
        table_cache[module] = []
        try:
            with open(curtable, 'r', encoding='utf-8') as f:
                csv_reader = csv.reader(f)
                next(csv_reader)  # 跳过表头
                for row in csv_reader:
                    table_cache[module].append({"Class":row[0]})
        except FileNotFoundError:
            print(f"警告：找不到文件 {curtable}，已跳过")
            continue

def process_lang_file():
    """处理语言配置文件，加载规则的中文描述、优先级和模块信息。

    读取lang.csv文件，解析每行数据，将规则名称作为键，
    对应的中文描述、优先级和模块信息作为值存储在rule_data_map_v2字典中。
    同时初始化rule_data_asset_list字典，为每个规则创建一个空的资源列表。

    全局变量:
        rule_data_map_v2 (dict): 存储规则的中文描述、优先级和模块信息
        rule_data_asset_list (dict): 存储每个规则关联的资源列表
    """
    global rule_data_map_v2
    # try:
        # result = []
    with open("lang.csv", 'r', encoding='utf-8') as f:
        csv_reader = csv.reader(f)
        next(csv_reader)  # 跳过表头
        for row in csv_reader:
            # result.append(int(row[0]))
            rule_data_map_v2[row[0]] = {"chinese":row[1],"priority":row[2],"module":row[3]}  
            rule_data_asset_list[row[0]] = []

        
        # return result
    # except Exception as e:
    #     print(f"处理 lang 文件时出错: {str(e)}")
    #     # return 0


def process_ext_data_file(file_path):
    """处理扩展数据文件，从CSV文件中读取并解析数据。

    该函数读取指定的CSV文件，跳过前两行表头，然后解析每一行数据，
    将每行数据转换为包含类ID和类路径的字典。

    Args:
        file_path (str): 扩展数据文件的路径

    Returns:
        list: 包含类信息的字典列表，每个字典包含以下键：
            - class (int): 类的ID
            - classpath (str): 类的路径
    """
    # print(file_path)
    result = []
    with open(file_path, 'r', encoding='utf-8') as f:
        csv_reader = csv.reader(f)
        next(csv_reader)  # 跳过表头
        next(csv_reader)  # 跳过表头
        for row in csv_reader:
            result.append({"classindex":int(row[0]),"classpath":row[1]})
    return result

def process_data_file(file_path):
    """处理规则数据文件，提取规则检查结果。

    Args:
        file_path (str): 规则数据文件的路径

    Returns:
        list: 包含规则检查结果的整数列表
    """
    # try:
    result = []
    with open(file_path, 'r', encoding='utf-8') as f:
        csv_reader = csv.reader(f)
        for row in csv_reader:
            result.append(int(row[0]))
    return result
    # except Exception as e:
    #     print(f"处理 data 文件时出错: {str(e)}")
    #     return 0

def process_rules_file(file_path):
    """处理规则配置文件，建立规则ID和规则名称的映射关系。

    Args:
        file_path (str): 规则配置文件的路径

    Returns:
        dict: 规则ID到规则名称的映射字典，如果处理失败则返回空字典
    """
    """处理rules文件并返回规则列表"""
    global rule_data_map_v2
    rules_data = {}
    try:
        with open(file_path, 'r', encoding='utf-8') as f:
            csv_reader = csv.reader(f)
            next(csv_reader)  # 跳过表头
            for row in csv_reader:
                rulename = row[0]
                dataindex = int(row[4])
                # print(f"rulename: {rulename} dataindex: {dataindex}")
                # print(rule_data_map_v2.keys())
                if rulename in rule_data_map_v2.keys():
                    rules_data[dataindex] = rulename
                else:
                    print(f"警告：规则 {rulename} 不在规则数据映射中，跳过")
                # rules_data.append(row)
                # rules_data[int(row[4])] = row[0]
                # print(int(row[4]))
        return rules_data
    except Exception as e:
        print(f"处理rules文件时出错: {str(e)}")
        return {}

def process_dirmap_file(file_path):
    """处理目录映射文件，建立目录ID和路径的映射关系。

    Args:
        file_path (str): 目录映射文件的路径

    Returns:
        dict: 目录ID到路径的映射字典，如果处理失败则返回空字典
    """
    """处理dirmap文件并返回方向映射字典"""
    temp_dirmap = {}
    try:
        with open(file_path, 'r', encoding='utf-8') as f:
            lines = f.readlines()
            for line in lines:
                row = line.strip().split(',')
                temp_dirmap[int(row[0])] = row[1]
        return temp_dirmap
    except Exception as e:
        print(f"处理dirmap文件时出错: {str(e)}")
        return {}



def read_uwascane_csv():
    """读取并处理UwaScan目录下的所有CSV文件。

    该函数完成以下任务:
    1. 处理dirmap.csv文件，建立目录映射关系
    2. 处理rules.csv文件，建立规则映射关系
    3. 处理所有data开头的文件，提取规则检查结果
    4. 根据规则检查结果，关联对应模块的资源信息

    全局变量:
        dirmap (dict): 目录ID到路径的映射
        rule_name_map (dict): 规则ID到规则名称的映射
        rule_data_map (dict): 规则ID到检查结果的映射
        rule_data_asset_list (dict): 规则名称到相关资源列表的映射
    """
    csv_folder = os.path.join(os.path.dirname(__file__), 'UwaScan')

    global dirmap
    global rule_name_map
    global rule_data_map
    global rule_data_asset_list

    file_path = os.path.join(csv_folder, "dirmap.csv")
    dirmap = process_dirmap_file(file_path)

    file_path1 = os.path.join(csv_folder, "rules.csv")
    rule_name_map = process_rules_file(file_path1)
    # print("rule_name_map")
    # print(rule_name_map)
    
    for file in os.listdir(csv_folder):
        if file.endswith('.csv'):
            # print(file)
            file_path = os.path.join(csv_folder, file)
            # try:
            # if file.startswith("dirmap"):
            #     dirmap = process_dirmap_file(file_path)
            # elif file.startswith("rules"):
            #     rule_name_map = process_rules_file(file_path)
            if file.startswith("data"):
                # print(file)
                dataindex = int(file.split('.')[-2])

                if dataindex not in rule_name_map:
                    print(f"警告：数据索引 {dataindex} 不在规则名称映射中，跳过")
                    continue

                rule_name = rule_name_map[dataindex]
                module = rule_data_map_v2[rule_name]["module"]
                # print(dataindex)
                # print(rule_name)
                rule_data_map[dataindex] = process_data_file(file_path)
                
                if dataindex < 3000:
                    for dataindex in rule_data_map[dataindex]:
                        # print(module)
                        assetinfo = table_cache[module][dataindex]
                        rule_data_asset_list[rule_name].append({"name":assetinfo["Name"]})
                else:
                    # [{"classindex":0,"classpath":""},,,,,]
                    extdata = None
                    if len(rule_data_map[dataindex]) > 0:
                        ext_file_path = file_path.replace("data.", "ext.data.")
                        extdata = process_ext_data_file(ext_file_path)
                    for classindex in rule_data_map[dataindex]:
                        Class = table_cache[module][classindex]
                        matched_data = list(filter(lambda x: x['classindex'] == classindex, extdata))[0]
                        # print(matched_data)
                        rule_data_asset_list[rule_name].append({"class":Class,"classpath":matched_data["classpath"]})

            # elif file.startswith("ext.data"):
            #     dataindex = int(file.split('.')[-2])
            #     rule_name = rule_name_map[dataindex]
            #     module = rule_data_map_v2[rule_name]["module"]
            #     print(dataindex)
            #     print(rule_name)
            #     # print(rule_name)
            #     # print(table_cache[module])
            #     # [{"class":0,"classpath":"/./////"}]
            #     rule_data_map[dataindex] = process_ext_data_file(file_path)

            #     for ext_data in rule_data_map[dataindex]:
            #         classindex = ext_data["classindex"]
            #         classpath = ext_data["classpath"]
            #         Class = table_cache[module][classindex]["Class"]
            #         rule_data_asset_list[rule_name].append({"class":Class,"classpath":classpath})
            # except Exception as e:
            #     print(f"读取文件 {file} 时出错: {str(e)}")


def generate_csv(name,header,table):
    """生成CSV文件。

    Args:
        name (str): 输出CSV文件的名称
        header (list): CSV文件的表头列表
        table (list): 要写入的数据列表，每个元素为一行数据
    """
    
    with open(name, 'w', newline='', encoding='utf-8') as f:
        writer = csv.writer(f)
        # 写入表头
        writer.writerow(header)
        # 写入数据
        for data in table:
            writer.writerow(data)

def gen_rule_list():
    """生成规则检查结果的统计报告。

    分析每个规则的检查结果，统计失败次数，
    结合规则的中文描述和优先级信息，
    生成包含状态、规则名称、失败次数和优先级的CSV报告。
    """
    header = ["state","rulename","count","priority"]
    table = []
    for key, rulename in rule_name_map.items():
        # print(key)
        failcount = len(rule_data_map[key])

        chinese = rule_data_map_v2[rulename]["chinese"]
        priority = rule_data_map_v2[rulename]["priority"]
        state = "失败" if failcount > 0 else "成功"
        # print(f"键: {key}, state: {state} chinese: {chinese} failcount:{failcount} priority:{priority} ")
        table.append([state, chinese, failcount, priority])

    generate_csv('output.csv',header, table)

def gen_asset_list():
    """生成规则相关资源列表的Markdown报告。

    将每个规则关联的资源信息以Markdown格式输出到文件，
    每个规则作为二级标题，其下列出相关的资源名称。
    """
    global rule_data_asset_list
    output_path = os.path.join(os.path.dirname(__file__), 'rule_data_asset_list.md')
    try:
        with open(output_path, 'w', encoding='utf-8') as f:
            # 写入标题
            # f.write("# rule_data_asset_list\n\n")
            # 写入数据
            for key, value_list in rule_data_asset_list.items():
                chinese = rule_data_map_v2[key]["chinese"]
                # print(f"## {chinese}\n")
                # 写入规则名称作为二级标题
                f.write(f"## {chinese}\n")
                # print(value_list)
                for asset in value_list:
                    f.write("- {}\n".format(asset))
                f.write("\n")
    except Exception as e:
        print(f"写入文件时出错: {str(e)}")

def main():
    """程序入口函数。

    按顺序执行以下任务:
    1. 处理资源表格文件
    2. 处理语言配置文件
    3. 读取并处理UwaScan目录下的CSV文件
    4. 生成规则检查结果报告
    5. 生成资源列表报告
    """
    process_table_cache()
    process_lang_file()
    # print("rule_data_map_v2")
    # print(rule_data_map_v2)
    # print("Hello, World!")
    read_uwascane_csv()
    # print(dirmap)
    # print(rule_name_map)
    # print(rule_data_asset_list)
    gen_rule_list()
    gen_asset_list()

if __name__ == "__main__":
    main()