import re
import os
from flask import Flask, render_template, request, flash, redirect, url_for
from werkzeug.utils import secure_filename

# --- Flask App Configuration ---
app = Flask(__name__)
# It's important to set a secret key for flashing messages.
app.config['SECRET_KEY'] = 'a_secure_random_secret_key'
app.config['MAX_CONTENT_LENGTH'] = 16 * 1024 * 1024  # 16 MB max upload size

# --- AWR Analysis Logic ---

def analyze_awr_content(report_text):
    """
    Main function to orchestrate the parsing and analysis of AWR report text.
    """
    # For easier parsing, it's better to work with plain text.
    # This regex removes HTML tags.
    clean_text = re.sub('<[^>]+>', ' ', report_text)

    # Parse different sections of the report.
    summary = parse_summary(clean_text)
    load_profile = parse_table(clean_text, r'Load Profile', r'Per Second\s*Per Transaction', 3)
    top_waits = parse_table(clean_text, r'Top 10 Foreground Events by Total Wait Time|Top Timed Foreground Events', r'Event\s*Waits\s*Total Wait Time \(s\)', 7, limit=10)
    instance_efficiency = parse_key_value_table(clean_text, r'Instance Efficiency Percentages', r'Buffer Nowait %')
    sql_by_elapsed = parse_table(clean_text, r'SQL ordered by Elapsed Time', r'Elapsed Time \(s\)\s*CPU Time \(s\)', 8, limit=10)

    # Analyze parsed data to generate insights and suggestions.
    return {
        'summary': summary,
        'load_profile': analyze_load_profile(load_profile),
        'top_waits': analyze_top_waits(top_waits),
        'instance_efficiency': analyze_instance_efficiency(instance_efficiency),
        'sql_by_elapsed': analyze_sql(sql_by_elapsed),
    }

# --- Parsing Utilities ---

def get_value(text, regex, group=1):
    """A helper function to extract a single value using regex."""
    match = re.search(regex, text, re.DOTALL)
    return match.group(group).strip() if match else 'N/A'

def parse_summary(text):
    """Parses the header/summary section of the AWR report."""
    return {
        'db_name': get_value(text, r'DB Name\s*DB Id[\s\S]*?\n\s*(\w+)'),
        'instance_name': get_value(text, r'DB Name\s*DB Id[\s\S]*?\n\s*\w+\s*\d+\s*(\w+)'),
        'elapsed': get_value(text, r'Elapsed:</td><td.*?</td><td.*?>([\d\.]+) \(mins\)</td>'),
        'db_time': get_value(text, r'DB Time:</td><td.*?</td><td.*?>([\d\.]+) \(mins\)</td>'),
    }

def parse_table(text, section_pattern, header_pattern, num_columns, limit=15):
    """
    A generic table parser for sections with a clear header and column structure.
    """
    try:
        section_match = re.search(section_pattern, text, re.IGNORECASE)
        if not section_match:
            return []

        section_text = text[section_match.start():]
        header_match = re.search(header_pattern, section_text, re.IGNORECASE)
        if not header_match:
            return []

        table_text = section_text[header_match.end():]
        lines = table_text.split('\n')

        data = []
        for line in lines:
            line = line.strip()
            # Stop parsing at the end of the table or section
            if not line or line.startswith('-') or line.startswith('End ') or line.lower().startswith('back to top'):
                if data:  # break only if we have already collected some data
                    break
                else:
                    continue
            
            # Split by 2 or more spaces, which is common in AWR reports.
            parts = re.split(r'\s{2,}', line)
            if len(parts) >= num_columns -1 : # Be flexible with column count
                 # For SQL text, the last part can contain spaces, so we join them back.
                if len(parts) >= num_columns:
                    parts[num_columns - 1] = ' '.join(parts[num_columns - 1:])
                    parts = parts[:num_columns]
                data.append(parts)
            
            if len(data) >= limit:
                break
        return data
    except Exception:
        return [] # Return empty list on any parsing error

def parse_key_value_table(text, section_pattern, header_pattern):
    """
    Parses tables that are essentially key-value pairs, like Instance Efficiency.
    """
    try:
        section_match = re.search(section_pattern, text, re.IGNORECASE)
        if not section_match:
            return {}

        section_text = text[section_match.start():]
        header_match = re.search(header_pattern, section_text, re.IGNORECASE)
        if not header_match:
            return {}

        table_text = section_text[header_match.end():]
        lines = table_text.split('\n')

        data = {}
        # The first line after header contains the values for the header columns
        header_columns = re.split(r'\s{2,}', header_pattern)
        value_line = lines[0].strip()
        values = re.split(r'\s{2,}', value_line)
        for i, header in enumerate(header_columns):
            if i < len(values):
                data[header.strip()] = values[i]

        # Process subsequent lines which are in 'Key: Value' format
        for line in lines[1:]:
            line = line.strip()
            if not line or line.startswith('-'):
                break
            if ':' in line:
                key, val = line.split(':', 1)
                data[key.strip()] = val.strip()

        return data
    except Exception:
        return {}


# --- Analysis Sections (to generate suggestions) ---

def analyze_load_profile(data):
    """Analyzes Load Profile data and provides suggestions."""
    result = {'data': {}, 'suggestions': []}
    if not data: return result

    profile = {row[0].strip(): {'per_second': row[1], 'per_transaction': row[2]} for row in data if len(row) > 2}
    result['data'] = profile
    
    try:
        db_time = float(profile.get('DB Time(s)', {}).get('per_second', '0').replace(',', ''))
        if db_time > 0.8: # More than 80% of one CPU core second
            result['suggestions'].append({
                'title': "高 DB Time",
                'text': f"DB Time per second 为 {db_time:.2f}，表示数据库负载较高。这意味着平均每个CPU核心每秒都非常繁忙。需要重点关注消耗DB Time最多的等待事件和SQL。"
            })
    except (ValueError, KeyError):
        pass # Ignore if data is not available

    return result

def analyze_top_waits(data):
    """Analyzes Top Wait Events data and provides suggestions."""
    result = {'data': [], 'suggestions': []}
    if not data: return result
    
    headers = ['event', 'waits', 'total_wait_time_s', 'avg_wait_ms', 'pct_total', 'wait_class']
    result['data'] = [dict(zip(headers, row)) for row in data]
    
    top_event = result['data'][0]['event'].lower()
    if 'db cpu' in top_event:
        result['suggestions'].append({
            'title': "CPU 成为主要瓶颈",
            'text': "DB CPU 是最主要的等待事件，意味着数据库大部分时间都在消耗CPU。请检查“按CPU时间排序的SQL”，找出消耗CPU最多的查询进行优化。"
        })
    elif 'log file sync' in top_event:
        result['suggestions'].append({
            'title': "日志文件同步等待",
            'text': "'log file sync' 等待事件突出，通常表示 LGWR 写入重做日志的速度较慢。可能的原因包括 I/O 性能差、频繁的 COMMIT 操作。"
        })
    elif 'db file sequential read' in top_event:
        result['suggestions'].append({
            'title': "索引读等待",
            'text': "'db file sequential read' 等待事件突出，通常与索引读取有关。如果等待时间过长，可能意味着索引效率不高或存在大量的单块读。"
        })
    return result

def analyze_instance_efficiency(data):
    """Analyzes Instance Efficiency data and provides suggestions."""
    result = {'data': data, 'suggestions': []}
    if not data: return result
    
    try:
        buffer_hit = float(data.get('Buffer Hit %', '100.0').replace('%',''))
        if buffer_hit < 95.0:
            result['suggestions'].append({
                'title': "缓冲命中率较低",
                'text': f"Buffer Cache Hit Ratio 为 {buffer_hit:.2f}%，低于建议的95%。这意味着数据库需要频繁地从磁盘读取数据，而不是从内存中获取。考虑增加 SGA_TARGET 或 DB_CACHE_SIZE。"
            })
    except (ValueError, KeyError):
        pass
        
    try:
        library_hit = float(data.get('Library Hit %', '100.0').replace('%',''))
        if library_hit < 98.0:
            result['suggestions'].append({
                'title': "库缓存命中率较低",
                'text': f"Library Hit Ratio 为 {library_hit:.2f}%，低于建议的98%。这可能意味着有大量的硬解析，消耗了额外的CPU。原因可能是没有使用绑定变量，或者共享池（Shared Pool）太小。"
            })
    except (ValueError, KeyError):
        pass
    
    return result

def analyze_sql(data):
    """Analyzes SQL statistics and provides suggestions."""
    result = {'data': [], 'suggestions': []}
    if not data: return result

    headers = ['elapsed_time_s', 'cpu_time_s', 'executions', 'elapsed_per_exec_s', 'pct_total', 'sql_id', 'sql_text']
    result['data'] = [dict(zip(headers, row)) for row in data]
    
    if result['data']:
        result['suggestions'].append({
            'title': "关注高消耗SQL",
            'text': "上表列出了按总耗时排序的SQL语句。这些是系统性能的主要消耗者。请重点分析排名靠前的SQL，检查它们的执行计划，并考虑优化，例如创建索引、改写查询或使用绑定变量。"
        })
    return result

# --- Flask Routes ---

@app.route('/', methods=['GET'])
def index():
    """Renders the main upload page."""
    return render_template('index.html')

@app.route('/analyze', methods=['POST'])
def analyze():
    """Handles file upload and analysis, then renders the results page."""
    if 'awr_file' not in request.files:
        flash('未选择文件')
        return redirect(url_for('index'))

    file = request.files['awr_file']

    if file.filename == '':
        flash('未选择文件')
        return redirect(url_for('index'))

    if file:
        try:
            # It's safer to read the content as bytes and decode it carefully.
            content = file.read().decode('utf-8', errors='ignore')
            analysis_results = analyze_awr_content(content)
            return render_template('results.html', analysis=analysis_results, filename=secure_filename(file.filename))
        except Exception as e:
            flash(f'分析文件时发生错误: {e}')
            return redirect(url_for('index'))

    return redirect(url_for('index'))


if __name__ == '__main__':
    # This is for development only. Use waitress for production.
    app.run(debug=True)
