import re
from collections import defaultdict

def analyze_memory_log(input_file):
    # 初始化统计数据结构，只保留block_pool和byte_pool
    stats = {
        'block_pool': defaultdict(lambda: {'count': 0, 'total_size': 0}),
        'byte_pool': defaultdict(lambda: {'count': 0, 'total_size': 0})
    }
    
    current_section = None
    pattern_mapping = {
        'block_pool': r'^\s*(\d+)\s+(\d+)\s+(\S+)\s+([^\()]+)\(Line\s+(\d+)\)',
        'byte_pool': r'^\s*(\d+)\s+(\d+)\s+(\S+)\s+([^\()]+)\(Line\s+(\d+)\)'
    }
    
    # 尝试多种编码方式打开文件
    encodings = ['utf-8', 'gbk', 'utf-16', 'latin1']
    for encoding in encodings:
        try:
            with open(input_file, 'r', encoding=encoding) as f:
                for line in f:
                    line = line.strip()
                    
                    # 检测当前处于哪个统计区
                    if "Allocated memory info(in block pool)" in line:
                        current_section = 'block_pool'
                        continue
                    elif "Allocated memory info(in byte pool)" in line:
                        current_section = 'byte_pool'
                        continue
                        
                    if not current_section or not line:
                        continue
                        
                    # 根据当前区进行匹配和处理
                    pattern = pattern_mapping.get(current_section)
                    if pattern:
                        match = re.match(pattern, line)
                        if match:
                            no = int(match.group(1))  # 记录编号
                            size = int(match.group(2))
                            entity_id = match.group(3).strip()
                            location = match.group(4).strip()
                            line_num = match.group(5)
                            
                            # 提取文件名
                            file_name = location.split(',')[0].strip()
                            
                            # 生成统计key: FileName,Entity_ID,Size,line
                            key = f"{file_name},{entity_id},{size},{line_num}"
                            
                            stats[current_section][key]['count'] += 1
                            stats[current_section][key]['total_size'] += size
            break  # 如果成功读取，跳出循环
        except UnicodeDecodeError:
            continue  # 尝试下一个编码
    
    return stats

def save_statistics(stats, output_file):
    with open(output_file, 'w', encoding='utf-8') as f:
        for section, data in stats.items():
            f.write(f"=== {section.upper()} ===\n")
            # 使用固定宽度格式，确保各列严格对齐
            header_format = "{:<30} {:<15} {:<10} {:<10} {:<10} {:<10}\n"
            f.write(header_format.format(
                "FileName", "Entity_ID", "Size", "Line", "Count", "TotalSize"
            ))
            f.write("-"*85 + "\n")
            
            # 按总大小降序排序
            sorted_items = sorted(data.items(), key=lambda x: (-x[1]['total_size'], x[0]))
            for key, values in sorted_items:
                file_name, entity_id, size, line = key.split(',')
                # 使用与标题相同的格式
                f.write(header_format.format(
                    file_name[:30],  # 限制文件名长度
                    entity_id[:15],  # 限制ID长度
                    size,
                    line,
                    values['count'],
                    values['total_size']
                ))
            f.write("\n")

if __name__ == "__main__":
    import glob
    import os
    
    # 获取当前目录下所有.ass文件
    ass_files = glob.glob('*.ass')
    
    if not ass_files:
        print("未找到任何.ass文件")
    else:
        for input_path in ass_files:
            # 生成对应的输出文件名
            output_path = os.path.splitext(input_path)[0] + '.txt'
            
            stats = analyze_memory_log(input_path)
            save_statistics(stats, output_path)
            print(f"内存统计已完成，结果保存到: {output_path}")
