import json
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from datetime import datetime
import re
from typing import Dict, List, Any
import plotly.graph_objects as go
from plotly.subplots import make_subplots
import plotly.express as px

class DatabaseExplorer:
    def __init__(self, database_file: str):
        """初始化数据库探索器"""
        self.database_file = database_file
        self.database = self.load_database()
        self.df = None
        
    def load_database(self) -> Dict:
        """加载数据库文件"""
        with open(self.database_file, 'r', encoding='utf-8') as f:
            return json.load(f)
    
    def get_database_info(self) -> Dict:
        """获取数据库基本信息"""
        info = {
            'total_tables': self.database['total_tables'],
            'total_requests': len(self.database['requests']),
            'status_codes': {},
            'response_sizes': [],
            'timestamp_range': {}
        }
        
        # 提取请求数据
        for request in self.database['requests']:
            # 状态码统计
            status = request['status_code']
            info['status_codes'][status] = info['status_codes'].get(status, 0) + 1
            
            # 响应大小统计
            info['response_sizes'].append(request['size'])
        
        # 时间戳范围
        timestamps = [req['timestamp'] for req in self.database['requests']]
        if timestamps:
            info['timestamp_range'] = {
                'start': min(timestamps),
                'end': max(timestamps)
            }
        
        return info
    
    def extract_request_data(self) -> pd.DataFrame:
        """提取请求数据到DataFrame"""
        data = []
        
        for i, request in enumerate(self.database['requests']):
            # 解析请求URL
            url = request['headers'].get('path', '')
            method = request['headers'].get('method', 'GET')
            
            # 提取关键信息
            row = {
                'request_id': request['request_id'],
                'status_code': request['status_code'],
                'size': request['size'],
                'timestamp': request['timestamp'],
                'method': method,
                'url': url,
                'domain': self.extract_domain(url),
                'path': self.extract_path(url),
                'query_params': self.extract_query_params(url),
                'content_type': self.extract_content_type(request['headers']),
                'cache_control': self.extract_cache_control(request['headers']),
                'is_json': self.is_json_response(request['body']),
                'response_size_kb': round(request['size'] / 1024, 2)
            }
            
            # 如果是JSON响应，尝试解析
            if row['is_json']:
                try:
                    json_data = json.loads(request['body'])
                    row['json_type'] = self.identify_json_type(json_data)
                except:
                    row['json_type'] = 'unknown'
            else:
                row['json_type'] = 'not_json'
            
            data.append(row)
        
        self.df = pd.DataFrame(data)
        return self.df
    
    def extract_domain(self, url: str) -> str:
        """从URL提取域名"""
        if url.startswith('/'):
            return 'internal'
        parts = url.split('/')
        return parts[0] if parts else 'unknown'
    
    def extract_path(self, url: str) -> str:
        """从URL提取路径"""
        if url.startswith('/'):
            return url
        parts = url.split('/', 1)
        return '/' + parts[1] if len(parts) > 1 else '/'
    
    def extract_query_params(self, url: str) -> str:
        """从URL提取查询参数"""
        if '?' in url:
            return url.split('?', 1)[1]
        return ''
    
    def extract_content_type(self, headers: Dict) -> str:
        """提取Content-Type"""
        content_type = headers.get('Content-Type', '')
        if ';' in content_type:
            return content_type.split(';')[0]
        return content_type
    
    def extract_cache_control(self, headers: Dict) -> str:
        """提取Cache-Control"""
        return headers.get('Cache-Control', '')
    
    def is_json_response(self, body: str) -> bool:
        """判断响应是否为JSON格式"""
        try:
            json.loads(body)
            return True
        except:
            return False
    
    def identify_json_type(self, json_data: Dict) -> str:
        """识别JSON数据类型"""
        if isinstance(json_data, dict):
            if 'errno' in json_data:
                return 'api_response'
            elif 'hits' in json_data:
                return 'search_results'
            elif 'total_tables' in json_data:
                return 'database_info'
            elif 'id' in json_data and 'title' in json_data:
                return 'content_item'
        elif isinstance(json_data, list):
            return 'array_data'
        return 'unknown'
    
    def analyze_status_codes(self) -> pd.DataFrame:
        """分析状态码分布"""
        if self.df is None:
            self.extract_request_data()
        
        status_analysis = self.df['status_code'].value_counts().reset_index()
        status_analysis.columns = ['status_code', 'count']
        status_analysis['percentage'] = (status_analysis['count'] / len(self.df) * 100).round(2)
        
        return status_analysis
    
    def analyze_response_sizes(self) -> Dict:
        """分析响应大小"""
        if self.df is None:
            self.extract_request_data()
        
        sizes = self.df['response_size_kb']
        return {
            'mean': sizes.mean(),
            'median': sizes.median(),
            'min': sizes.min(),
            'max': sizes.max(),
            'std': sizes.std(),
            'q25': sizes.quantile(0.25),
            'q75': sizes.quantile(0.75)
        }
    
    def analyze_domains(self) -> pd.DataFrame:
        """分析域名分布"""
        if self.df is None:
            self.extract_request_data()
        
        domain_analysis = self.df['domain'].value_counts().reset_index()
        domain_analysis.columns = ['domain', 'count']
        domain_analysis['percentage'] = (domain_analysis['count'] / len(self.df) * 100).round(2)
        
        return domain_analysis
    
    def analyze_content_types(self) -> pd.DataFrame:
        """分析内容类型分布"""
        if self.df is None:
            self.extract_request_data()
        
        content_analysis = self.df['content_type'].value_counts().reset_index()
        content_analysis.columns = ['content_type', 'count']
        content_analysis['percentage'] = (content_analysis['count'] / len(self.df) * 100).round(2)
        
        return content_analysis
    
    def analyze_timeline(self) -> pd.DataFrame:
        """分析时间线"""
        if self.df is None:
            self.extract_request_data()
        
        # 转换时间戳
        df_time = self.df.copy()
        df_time['timestamp'] = pd.to_datetime(df_time['timestamp'])
        df_time['hour'] = df_time['timestamp'].dt.hour
        
        # 按小时统计
        hourly_stats = df_time.groupby('hour').agg({
            'request_id': 'count',
            'response_size_kb': 'mean'
        }).reset_index()
        hourly_stats.columns = ['hour', 'request_count', 'avg_response_size']
        
        return hourly_stats
    
    def find_large_responses(self, threshold_kb: float = 100) -> pd.DataFrame:
        """查找大型响应"""
        if self.df is None:
            self.extract_request_data()
        
        large_responses = self.df[self.df['response_size_kb'] > threshold_kb]
        return large_responses.sort_values('response_size_kb', ascending=False)
    
    def find_error_responses(self) -> pd.DataFrame:
        """查找错误响应"""
        if self.df is None:
            self.extract_request_data()
        
        error_responses = self.df[self.df['status_code'] >= 400]
        return error_responses.sort_values('timestamp', ascending=False)
    
    def create_visualizations(self):
        """创建可视化图表"""
        if self.df is None:
            self.extract_request_data()
        
        # 设置图表样式
        plt.style.use('seaborn-v0_8')
        fig, axes = plt.subplots(2, 2, figsize=(15, 12))
        fig.suptitle('数据库分析可视化', fontsize=16)
        
        # 1. 状态码分布
        status_counts = self.df['status_code'].value_counts()
        axes[0, 0].pie(status_counts.values, labels=status_counts.index, autopct='%1.1f%%')
        axes[0, 0].set_title('状态码分布')
        
        # 2. 响应大小分布
        axes[0, 1].hist(self.df['response_size_kb'], bins=30, alpha=0.7)
        axes[0, 1].set_title('响应大小分布 (KB)')
        axes[0, 1].set_xlabel('大小 (KB)')
        axes[0, 1].set_ylabel('频次')
        
        # 3. 域名分布
        domain_counts = self.df['domain'].value_counts().head(10)
        axes[1, 0].barh(range(len(domain_counts)), domain_counts.values)
        axes[1, 0].set_yticks(range(len(domain_counts)), domain_counts.index)
        axes[1, 0].set_title('域名分布 (Top 10)')
        axes[1, 0].set_xlabel('请求数')
        
        # 4. 时间线
        timeline_data = self.analyze_timeline()
        axes[1, 1].plot(timeline_data['hour'], timeline_data['request_count'])
        axes[1, 1].set_title('每小时请求数')
        axes[1, 1].set_xlabel('小时')
        axes[1, 1].set_ylabel('请求数')
        
        plt.tight_layout()
        plt.savefig('database_analysis.png', dpi=300, bbox_inches='tight')
        plt.show()
    
    def generate_report(self) -> str:
        """生成分析报告"""
        info = self.get_database_info()
        status_analysis = self.analyze_status_codes()
        size_analysis = self.analyze_response_sizes()
        domain_analysis = self.analyze_domains()
        content_analysis = self.analyze_content_types()
        
        report = f"""
# 数据库分析报告

## 基本信息
- 总表数: {info['total_tables']}
- 总请求数: {info['total_requests']}
- 时间范围: {info['timestamp_range']['start']} 到 {info['timestamp_range']['end']}

## 状态码分析
{status_analysis.to_string(index=False)}

## 响应大小分析
- 平均大小: {size_analysis['mean']:.2f} KB
- 中位数: {size_analysis['median']:.2f} KB
- 最大值: {size_analysis['max']:.2f} KB
- 最小值: {size_analysis['min']:.2f} KB

## 域名分布 (Top 10)
{domain_analysis.head(10).to_string(index=False)}

## 内容类型分布
{content_analysis.to_string(index=False)}

## 异常请求
- 大响应 (>100KB): {len(self.find_large_responses())} 个
- 错误响应 (>=400): {len(self.find_error_responses())} 个
"""
        
        return report
    
    def search_requests(self, **filters) -> pd.DataFrame:
        """搜索请求"""
        if self.df is None:
            self.extract_request_data()
        
        filtered_df = self.df.copy()
        
        for key, value in filters.items():
            if key in filtered_df.columns:
                if isinstance(value, list):
                    filtered_df = filtered_df[filtered_df[key].isin(value)]
                else:
                    filtered_df = filtered_df[filtered_df[key] == value]
        
        return filtered_df
    
    def export_to_csv(self, filename: str = 'database_export.csv'):
        """导出数据到CSV"""
        if self.df is None:
            self.extract_request_data()
        
        self.df.to_csv(filename, index=False, encoding='utf-8')
        print(f"数据已导出到 {filename}")

# 主程序
def main():
    # 创建数据库探索器
    explorer = DatabaseExplorer('database.json')
    
    # 生成报告
    print("=== 数据库分析报告 ===")
    print(explorer.generate_report())
    
    # 创建可视化
    print("\n正在生成可视化图表...")
    explorer.create_visualizations()
    
    # 导出数据
    print("\n导出数据到CSV...")
    explorer.export_to_csv()
    
    # 示例查询
    print("\n=== 示例查询 ===")
    
    # 查找大型响应
    large_responses = explorer.find_large_responses(50)
    print(f"超过50KB的响应 ({len(large_responses)} 个):")
    print(large_responses[['request_id', 'url', 'response_size_kb', 'status_code']].head())
    
    # 查找错误响应
    error_responses = explorer.find_error_responses()
    print(f"\n错误响应 ({len(error_responses)} 个):")
    print(error_responses[['request_id', 'url', 'status_code', 'timestamp']].head())
    
    # 搜索特定域名
    api_requests = explorer.search_requests(domain='internal')
    print(f"\n内部API请求 ({len(api_requests)} 个):")
    print(api_requests[['request_id', 'path', 'status_code', 'response_size_kb']].head())

if __name__ == "__main__":
    main()