import re
import os
import glob
import json5
import time
import pandas as pd
from loguru import logger
from tiny_logger import TinyLogger
from visualization_tool import VisualizationTool
from line_processor import LineProcessor


class SmapsAnalyzer(LineProcessor):
    """smaps 文件解析工具

    目标：

    解析出内存相关数据表格

    提供数据分析和可视化的能力

    """

    def __init__(self, cfg = "config.json5"):
        """根据配置文件初始化
        """
        self.tl = TinyLogger("INFO") # 日志工具
        config = self.read_cfg(cfg)
        # 根据配置获取文件列表
        self.smaps_files = self.get_files(config["input_files"], config["input_pattern"])
        self.focus_cols = config["focus_cols"] # 关注的数据项，预处理数据缓存的列
        # mk out out/cache out/pngs
        self.out_dir = config["out_dir"]
        self.mk_out()
        self.steps = config["run"]
        self.name_rules = config["name_rules"]
        # 缓存路径
        cache_tag = config["cache_tag"]
        self.cache_dir = f"{self.out_dir}/cache/{cache_tag}"
        self.smaps_data = {} # key -> DataFrame 中间数据存储
        # 工具
        self.vt = VisualizationTool()
        
    def mk_out(self):
        """创建输出文件夹
        """
        if not os.path.isdir(self.out_dir):
            os.mkdir(self.out_dir)
            os.mkdir(f"{self.out_dir}/cache")
            os.mkdir(f"{self.out_dir}/pngs")

    def get_tag(self):
        return int(time.time() * 1000)

    def mk_cache(self):
        """创建缓存文件夹

        prefix: 文件夹前缀
        """
        # suffix
        suffix = self.get_tag()
        self.cache_dir = f"{self.out_dir}/cache/{suffix}"
        os.makedirs(self.cache_dir, exist_ok=True)

    def get_files(self, dir_or_files, pat = "*.txt"):
        """获取文件列表
        Args:
            dir_or_files: 文件或文件夹列表
        
        Returns:
            文件列表
        """
        result = []
        for dir_or_file in dir_or_files:
            if os.path.isdir(dir_or_file):
                result.extend(glob.glob(f"{dir_or_file}/{pat}"))
            elif os.path.isfile(dir_or_file):
                result.append(dir_or_file)
            else:
                continue
        return result

    def read_cfg(self, cfg: str):
        """读取json5配置文件
        """
        try:
            with open(cfg, 'r', encoding="utf-8") as jsonfile:
                return json5.load(jsonfile)
        except FileNotFoundError:
            logger.error(f"Could not find config file for {cfg}")
            return None

    def parse(self):
        """解析 smaps 文件
        """
        for file in self.smaps_files:
            data = self.parse_smaps(file)
            self.smaps_data[f"{file}"] = data
        logger.info(f"SmapsAnalyzer parse success! get {len(self.smaps_data)} smaps data")

    def parse_smaps(self, smaps_file: str):
        """解析单个smaps文件
        """
        data = self.process(smaps_file)

        return pd.DataFrame(data)

    def update_seg_head(self, seg, head):
        seg.update(head)
        return seg
    
    def update_seg_body(self, seg, part):
        key, value = part
        seg[key] = value 

    def line_to_val(self, line: str):
        """解析内存段 统计信息
        示例：
            Name:           [anon:ArkTS Heap3035semi space]
            Size:                256 kB
            KernelPageSize:        4 kB
            MMUPageSize:           4 kB
            Rss:                  12 kB
            Pss:                  12 kB
            Shared_Clean:          0 kB
            Shared_Dirty:          0 kB
            Private_Clean:        12 kB
            Private_Dirty:         0 kB
            Referenced:           12 kB
            Anonymous:           256 kB
            LazyFree:              0 kB
            AnonHugePages:         0 kB
            ShmemPmdMapped:        0 kB
            Shared_Hugetlb:        0 kB
            Private_Hugetlb:       0 kB
            Swap:                  0 kB
            SwapPss:               0 kB
            Locked:                0 kB
            ProtectionKey:         0
            VmFlags: pr io co an 可能为空
        """
        match = re.match(r'([_a-zA-Z]+):\s*(.*)', line)
        if match:
            key, value = match.groups()
            key = key.lower().strip().replace(' ', '_')
            value = self.cast_value(key, value.strip())
            return key, value
        else:
            logger.error(f"Cound not match the segment detail format: key: value, data: {line}")

    # 统计信息值类型分类
    int_keys = [
        'size', 'kernelpagesize', 'mmupagesize', 'rss', 'pss', 'shared_clean', 'shared_dirty',
        'private_clean', 'private_dirty', 'referenced', 'anonymous', 'lazyfree', 'anonhugepages',
        'shmempmdmapped', 'shared_hugetlb', 'private_hugetlb', 'swap', 'swappss', 'locked', 'protectionkey'
    ]
    str_keys = ["name"]
    str_list_keys = ["vmflags"]

    def cast_value(self, key: str, value: str):
        """根据key转换不同类型

        name -> str
        xxx  -> int
        vmflags -> list[str]
        """
        if key in self.str_keys:
            return value
        if key in self.int_keys:
            # value is xx kB or xx
            return int(value.split()[0])
        if key in self.str_list_keys:
            # VmFlags: pr io co an
            return tuple(value.split())
        
        logger.error(f"Unknown value type for {key}")
        return value


    def header(self, line: str) -> dict:
        """解析内存段 头部元信息
            2561600000-2561640000 rw-p 00000000 00:00 0                              [anon:ArkTS Heap3035semi space]
        Return:

        """
        match = re.match(r'([0-9A-Fa-f]+)-([0-9A-Fa-f]+) ([-rwxsp]+)\s+([0-9A-Fa-f]+) ([0-9A-Fa-f:]+) ([0-9]+)\s*(.*)', line)
        if match:
            addr_start, addr_end, perms, offset, dev, inode, path = match.groups()
            return {
                'address': f'{addr_start}-{addr_end}',
                'permissions': perms,
                'offset': offset,
                'device': dev,
                'inode': inode,
                'pathname': path.strip() or '[anon]'
            }
        else:
            # logger.error("Cound not match the segment header format: [address permissions offset device:inode pathname]")
            return None

    def pie(self, data, axis, path):
        """使用VisualizationTool绘制饼图
        """
        self.vt.update_cfg({
            "figure_size": (12, 12),
            "path": path,
            "title": "Pie Chart",
            "column": axis,
        })
        self.vt.pie(data)

    def line(self, data, path):
        """使用VisualizationTool绘制折线图
        """
        self.vt.update_cfg({
            "figure_size": (12, 12),
            "path": path,
            "title": "Line Chart",
            "xlabel": "Time",
            "ylabel": "Mem(MB)",
            "interval": 6,
        })
        self.vt.line(data)


    def groupby_name(self, data, cols):
        # 根据pathname 分类
        # 自定义分组
        def group_by_pathname(name):
            for key, category in self.name_rules.items():
                if key in name:
                    return category
            return "Other"
        # 筛选出 cols 的结果
        return data.groupby(data["pathname"].apply(group_by_pathname)).sum()[cols]

    def to_mb(self, val, valid=2):
        """kb 转换为 mb，保留2位小数
        """
        return round(val / 1024, valid)

    def analysis(self):
        """分析结果
        """
        show_data = {
            # "x": [],
            "total_pss": [],
            "total_rss": [],
            # "pss_arkts_heap": [],
            "pss_app_lib": []
        }

        for _, data in self.smaps_data.items():
            # rss > 0
            grouped_data = self.groupby_name(data, ["rss", "pss", "swap", "swappss"])
            
            # show_data["x"].append(self.map_key(key))
            # pss + swappss
            show_data["total_pss"].append(self.to_mb(data["pss"].sum() + data["swappss"].sum()))
            show_data["total_rss"].append(self.to_mb(data["rss"].sum() + data["swap"].sum()))
            # show_data["pss_arkts_heap"].append(self.to_mb(grouped_data.at['ArkTS Heap', 'pss']))
            # 这里可以添加自定义的可视化字段
            if "App Lib" in grouped_data:
                show_data["pss_app_lib"].append(self.to_mb(grouped_data.at['App Lib', 'pss']))
        if len(show_data["pss_app_lib"]) == 0:
            del show_data['pss_app_lib']
        # 构造 DataFrame 画图
        df = pd.DataFrame(show_data)
        logger.info(f"{df.shape}")
        tag = self.get_tag()
        self.line(df, f"{self.out_dir}/pngs/pss_rss_plot_{tag}.png")
        logger.info(f"SmapsAnalyzer analysis success! write png {self.out_dir}/pngs/pss_rss_plot_{tag}.png")

    def cache(self):
        """缓存中间结果 -> self.cache_dir
        """
        self.mk_cache()
        if len(self.smaps_data) == 0:
            self.parse()
        for key, data in self.smaps_data.items():
            data[self.focus_cols].to_csv(f"{self.cache_dir}/{os.path.basename(key)}.csv")
        logger.info(f"SmapsAnalyzer cache success! write into {self.cache_dir}")

    def load(self):
        """加载缓存 -> self.maps_data
        """
        if not os.path.isdir(self.cache_dir):
            logger.info(f"{self.cache_dir} does not exist")
            self.cache()

        for file in self.smaps_files:
            self.smaps_data[file] = pd.read_csv(f"{self.cache_dir}/{os.path.basename(file)}.csv")

        logger.info(f"SmapsAnalyzer load cache success! get {len(self.smaps_data)} smaps data")

    def run(self):

        if "prepare" in self.steps:
            # 读取文件列表 -> self.smaps_data
            self.parse()
        if "cache" in self.steps:
            # 缓存中间结果
            self.cache()
        
        if "load" in self.steps:
            # 加载缓存结果
            self.load()

        if "analysis" in self.steps:
            self.analysis()
        
        logger.info(f"SmapsAnalyzer run success!")

if __name__ == "__main__":
    sa = SmapsAnalyzer()
    sa.run()
