import os
import re
import sys

import pandas as pd


class ParserMerReqConsuming:
    # 类变量
    OUTPUT_FILENAME = 'mer_consuming.xlsx'

    def __init__(self, filepath):
        # 日志地址
        self.filepath = filepath
        # 商户请求接口的map
        self.interface_map = {}
        # 请求耗时map
        self.res_map = {}
        # 请求接口耗时map
        self.interface_consuming_map = {}

    def parse_log(self, ak_value):
        with open(self.filepath, 'r', encoding='utf-8') as f:
            for line in f:
                self._parse_line_get_rid(line, ak_value)
        with open(self.filepath, 'r', encoding='utf-8') as f:
            for line in f:
                self._parse_line_get_consuming(line)
        with open(self.filepath, 'r', encoding='utf-8') as f:
            for line in f:
                self._parse_line_get_interface_consuming(line)

    def _parse_line_get_rid(self, line, ak_value):
        # 提取商户接口对应的requestId,此处requestId需要包含字母数字，用\w
        pattern_interface = r"requestId:\[(?P<request_id>\w+)\].*?ak: " + re.escape(
            ak_value) + r",\s*请求地址: (?P<url>\S+)"

        interface_match = re.search(pattern_interface, line)
        if interface_match:
            rid = interface_match.group('request_id')
            url = interface_match.group('url')
            self.interface_map[rid] = url

    def _parse_line_get_consuming(self, line):
        # 提取商户接口对应的requestId
        pattern_consuming = r"requestId:\[(?P<request_id>\w+)\].*?response: (?P<response>\{.*?\}),\s*请求耗时: (?P<cost_time>\d+)ms"
        interface_match = re.search(pattern_consuming, line)
        if interface_match:
            rid = interface_match.group('request_id')
            res = interface_match.group('response')
            cost = interface_match.group('cost_time')
            self.res_map[rid] = (res, cost)

    def _parse_line_get_interface_consuming(self, line):
        # 提取商户接口对应的requestId,此处requestId需要包含字母数字，用\w
        pattern_consuming = r"requestId:\[(?P<request_id>\w+)\].*?平台接收请求耗时: (?P<cost_time>\d+) ms"
        interface_match = re.search(pattern_consuming, line)
        if interface_match:
            rid = interface_match.group('request_id')
            interface_cost = interface_match.group('cost_time')
            self.interface_consuming_map[rid] = interface_cost

    def merge_and_export(self):
        records = []
        for rid, url in self.interface_map.items():
            if rid in self.res_map and rid in self.interface_consuming_map:
                res, cost = self.res_map[rid]
                interface_cost = self.interface_consuming_map[rid]
                records.append({
                    'requestId': rid,
                    '请求接口': url,
                    '上游响应报文': res,
                    '请求上游耗时': cost,
                    '接口耗时': interface_cost
                })

        if not records:
            print("没有找到匹配的日志记录。")
            return

        df = pd.DataFrame(records)

        output_path = os.path.join(os.path.dirname(__file__), self.OUTPUT_FILENAME)

        # 按每1万条分sheet
        with pd.ExcelWriter(output_path, engine='openpyxl') as writer:
            for i in range(0, len(df), 10000):
                sheet_df = df.iloc[i:i + 10000]
                sheet_name = f'Sheet_{i // 10000 + 1}'
                sheet_df.to_excel(writer, sheet_name=sheet_name, index=False)

        print(f"导出完成，文件保存在: {output_path}")


if __name__ == "__main__":
    if len(sys.argv) != 3:
        print("用法: python script.py <日志文件路径>")
        sys.exit(1)
    filepath = sys.argv[1]

    if not os.path.isfile(filepath):
        print(f"文件不存在: {filepath}")
        sys.exit(1)

    ak_value = sys.argv[2]

    if ak_value is None:
        print("请输入AK值")
        sys.exit(1)

    '''
    跟据ak获取指定日志的调用情况，输出到excel表格
    '''
    extractor = ParserMerReqConsuming(filepath)
    extractor.parse_log(ak_value)
    extractor.merge_and_export()
