import json
import csv
from logging import Logger
from pathlib import Path
from typing import Any
from urllib.parse import urlparse


class NetworkLogParser:

    def __init__(self, network_log_path: Path, logger: Logger):
        self.network_log_path = network_log_path
        self.logger = logger


    @staticmethod
    def _parse_log_line(line: str) -> dict[str, str]:
        try:
            line_dict = json.loads(line)
        except:
            return {}
        return {
            'method': line_dict['method'],
            'url': line_dict['url'],
            'timing': line_dict['timing']['total'],
        }


    @staticmethod
    def _match_url_to_entrypoint(url: str, endpoint: str) -> bool:
        parsed_url = urlparse(url)
        url_path = parsed_url.path
        url_parts = url_path.strip('/').split('/')
        endpoint_parts = endpoint.strip('/').split('/')
        # 只比较最后的部分，便于zip遍历
        url_parts = url_parts[-len(endpoint_parts):]

        for url_part, endpoint_part in zip(url_parts, endpoint_parts):
            # 路径参数以{}包裹，可以匹配任意值
            if endpoint_part.startswith('{') and endpoint_part.endswith('}'):
                continue
            if url_part != endpoint_part:
                return False
        return True


    def _process_log_file(self, log_file: Path, api_list: list[Any]) -> list[list[str]]:
        """处理单个日志文件"""
        data = {}
        with open(log_file, 'r', encoding='utf-8') as f:
            for line in f.readlines():
                line = line.strip()
                if not line or line.startswith('='):  # 跳过空行和分隔符
                    continue
                parsed = self._parse_log_line(line)
                if not parsed:
                    continue
                
                matched_api = None
                for api in api_list:
                    if parsed['method'].lower() == api['method'] and \
                        self._match_url_to_entrypoint(parsed['url'], api['path']):
                        matched_api = api
                if not matched_api:
                    continue
                
                key = f'{matched_api['path']}__{parsed['method']}'
                if not data.get(key, None):
                    data[key] = {'timing': [parsed['timing']]}
                else:
                    data[key]['timing'].append(parsed['timing'])
        result = []
        for key, value in data.items():
            result_line = key.split('__') + \
                [sum(map(float, value['timing'])) / len(value['timing'])] + \
                value['timing']
            result.append(result_line)
        return result


    def process_all_logs(self, target_apis: list[Any]) -> list[str]:
        """处理所有找到的日志文件"""
        log_files = []
        # 遍历base_path下的所有子文件夹
        for item in self.network_log_path.iterdir():
            sample_file = item / 'sample.log'
            log_files.append(sample_file)
        self.logger.info(f"找到 {len(log_files)} 个日志文件")
        
        all_data = []
        for log_file in log_files:
            self.logger.info(f"处理 network monitor sample 文件: {str(log_file)}")
            file_data =self._process_log_file(log_file, target_apis)
            all_data.extend(file_data)
            self.logger.info(f"提取了 {len(file_data)} 条记录")
        
        return all_data


    def save_to_csv(self, data: list[str], output_path: Path):
        """将解析的数据保存为CSV文件"""
        if not data:
            self.logger.warning("没有数据需要保存")
            return
            
        with open(output_path, 'w', newline='', encoding='utf-8') as csvfile:
            fieldnames = ['path', 'method', 'timeing_avg (ms)', 'timings']
            writer = csv.writer(csvfile)
            writer.writerow(fieldnames)
            writer.writerows(data)
        self.logger.info(f"已保存 {len(data)} 条记录到 {output_path}")
            
