import re
import pandas as pd
import matplotlib.pyplot as plt
import argparse
import sys
import os
from mpl_toolkits.mplot3d import Axes3D
import numpy as np
from sklearn.linear_model import LinearRegression
import csv
import sqlite3

# Parse command-line arguments
parser = argparse.ArgumentParser(description='Analyze log files for problem size and processing time.')
parser.add_argument('log_path', type=str, help='Path to the log file or directory to analyze')
parser.add_argument('--db_path', type=str, default=None, help='Path to the SQLite database file')
parser.add_argument('--track_name', type=str, default=None, help='Track name to filter log files by hash')
args = parser.parse_args()

log_path = args.log_path

# Regular expressions to extract data
varnum_re = re.compile(r'VarNum:\s*(\d+)')
clanum_re = re.compile(r'ClaNum:\s*(\d+)')
decomp_re = re.compile(r'Decomp:\s*([\d.]+) seconds')
parsing_re = re.compile(r'Parsing:\s*([\d.]+) seconds')
sort_re = re.compile(r'Total sorting time:\s*([\d.]+) seconds')
merge_re = re.compile(r'Merge time:\s*([\d.]+) seconds')
instance_re = re.compile(r'Parsing file:\s*(.+)')
error_re = re.compile(r'\[gen-scope\] Error processing (.+?): (.+)')

# --- Scope Image Creation Analysis ---
# Regular expressions for image creation logs
scope_image_re = re.compile(r'Scope Image Creation: ([\d.eE+-]+) seconds')
ascii_image_re = re.compile(r'ASCII Image Creation: ([\d.eE+-]+) seconds')
processed_image_re = re.compile(r'\[gen-image\] Processed: (.+)')

# Extract hash from filename (assuming hash is before first dash)
def get_hash_from_filename(filename):
    if filename:
        parts = os.path.basename(filename).split('-')
        if parts:
            return parts[0]
    return 'unknown'

image_creation_data = []

# Data storage
data = []

def get_track_hashes(db_path, track_name):
    """Get hash codes for files in the specified track."""
    conn = sqlite3.connect(db_path)
    cursor = conn.cursor()
    query = """
    SELECT f.hash 
    FROM features f 
    LEFT JOIN track t ON f.hash = t.hash 
    WHERE t.value LIKE ?
    """
    cursor.execute(query, (track_name,))
    hashes = {row[0] for row in cursor.fetchall()}
    conn.close()
    return hashes

def process_log_file(log_file):
    try:
        with open(log_file, 'r') as f:
            lines = f.readlines()
            current = {'source_file': os.path.basename(log_file)}
            for line in lines:
                if m := instance_re.search(line):
                    if current and len(current) > 1:  # Only append if we have more than just source_file
                        data.append(current)
                    current = {'source_file': os.path.basename(log_file)}
                    current['instance'] = m.group(1)
                if m := varnum_re.search(line):
                    current['varnum'] = int(m.group(1))
                if m := clanum_re.search(line):
                    current['clanum'] = int(m.group(1))
                if m := decomp_re.search(line):
                    current['decomp_time'] = float(m.group(1))
                if m := parsing_re.search(line):
                    current['parsing_time'] = float(m.group(1))
                if m := sort_re.search(line):
                    current['sort_time'] = float(m.group(1))
                if m := merge_re.search(line):
                    current['merge_time'] = float(m.group(1))
                if m := ascii_image_re.search(line):
                    current['ascii_image_time'] = float(m.group(1))
                if m := error_re.search(line):
                    # If error, record error message and finish this instance
                    current['error'] = m.group(2)
                    if len(current) > 1:  # Only append if we have more than just source_file
                        data.append(current)
                    current = {'source_file': os.path.basename(log_file)}
            if current and len(current) > 1:  # Only append if we have more than just source_file
                data.append(current)
    except Exception as e:
        print(f"Error processing {log_file}: {e}")

def process_scope_image_log(log_file):
    try:
        with open(log_file, 'r') as f:
            lines = f.readlines()
            current_instance = None
            image_idx = 0
            for line in lines:
                if m := processed_image_re.search(line):
                    current_instance = m.group(1)
                    image_idx = 0
                if m := scope_image_re.search(line):
                    image_time = float(m.group(1))
                    image_creation_data.append({
                        'instance': current_instance,
                        'source_file': os.path.basename(log_file),
                        'image_index': image_idx,
                        'scope_image_creation_time': image_time
                    })
                    image_idx += 1
    except Exception as e:
        print(f"Error processing scope image log {log_file}: {e}")

# Check if path is file or directory
hash_filter = None
if args.db_path and args.track_name:
    hash_filter = get_track_hashes(args.db_path, args.track_name)

def file_hash_in_filter(fname, hash_filter):
    # Extract hash (before first dash)
    hash_part = fname.split("-")[0]
    return hash_part in hash_filter

if os.path.isfile(log_path):
    if not hash_filter or file_hash_in_filter(os.path.basename(log_path), hash_filter):
        process_log_file(log_path)
elif os.path.isdir(log_path):
    # Process all .txt files and files with no extension in the directory
    for fname in os.listdir(log_path):
        if (fname.endswith('.log') or '.' not in fname):
            if not hash_filter or file_hash_in_filter(fname, hash_filter):
                process_log_file(os.path.join(log_path, fname))
else:
    print(f"Error: '{log_path}' is not a valid file or directory.")

# After processing the main logs, process image creation logs if any
if os.path.isdir(log_path):
    for fname in os.listdir(log_path):
        if (fname.endswith('.log') or '.' not in fname):
            if not hash_filter or file_hash_in_filter(fname, hash_filter):
                print(f"Processing scope image log: {os.path.join(log_path, fname)}")
                process_scope_image_log(os.path.join(log_path, fname))
elif os.path.isfile(log_path):
    if not hash_filter or file_hash_in_filter(os.path.basename(log_path), hash_filter):
        process_scope_image_log(log_path)

# Convert to DataFrame
df = pd.DataFrame(data)

# Save the extracted table to CSV for further analysis
df.to_csv('instance_times_table.csv', index=False)
print('Extracted table saved to instance_times_table.csv')

if not df.empty:

    # Print max, min, mean, and median for times, with file and instance info for max/min
    for col in ['decomp_time', 'parsing_time', 'sort_time', 'ascii_image_time']:
        if col in df.columns:
            print(f'\n--- {col} statistics ---')
            valid = df[df[col].notnull()]
            if not valid.empty:
                max_idx = valid[col].idxmax()
                min_idx = valid[col].idxmin()
                
                if col == 'ascii_image_time':
                    # Special handling for ASCII image time - only show file hash
                    max_hash = get_hash_from_filename(valid.loc[max_idx, 'source_file'] if 'source_file' in valid.columns else None)
                    min_hash = get_hash_from_filename(valid.loc[min_idx, 'source_file'] if 'source_file' in valid.columns else None)
                    print(f'Max {col}: {valid.loc[max_idx, col]:.6f} (hash: {max_hash})')
                    print(f'Min {col}: {valid.loc[min_idx, col]:.6f} (hash: {min_hash})')
                else:
                    # Print max with available information
                    max_info = []
                    for field in ['source_file', 'instance', 'varnum', 'clanum']:
                        if field in valid.columns:
                            max_info.append(f"{field}: {valid.loc[max_idx, field]}")
                    print(f'Max {col}: {valid.loc[max_idx, col]:.6f} ({", ".join(max_info)})')
                    
                    # Print min with available information
                    min_info = []
                    for field in ['source_file', 'instance', 'varnum', 'clanum']:
                        if field in valid.columns:
                            min_info.append(f"{field}: {valid.loc[min_idx, field]}")
                    print(f'Min {col}: {valid.loc[min_idx, col]:.6f} ({", ".join(min_info)})')
                
                print(f'Mean {col}: {valid[col].mean():.6f}')
                print(f'Median {col}: {valid[col].median():.6f}')

                # Percentile statistics
                percentiles = [0, 25, 50, 75, 100]
                perc_values = np.percentile(valid[col], percentiles)
                print(f"Percentiles for {col}:")
                for p, v in zip(percentiles, perc_values):
                    # Find the row(s) closest to this percentile value
                    closest_idx = (valid[col] - v).abs().idxmin()
                    row = valid.loc[closest_idx]
                    
                    if col == 'ascii_image_time':
                        # For ASCII image time, only show file hash
                        file_hash = get_hash_from_filename(row['source_file'] if 'source_file' in row else None)
                        print(f"  {p}th percentile: {v:.6f} (hash: {file_hash})")
                    else:
                        # Gather available information for this percentile
                        perc_info = []
                        for field in ['source_file', 'instance', 'varnum', 'clanum']:
                            if field in valid.columns:
                                perc_info.append(f"{field}: {row[field]}")
                        
                        # Calculate total time if components are available
                        if 'parsing_time' in row and 'sort_time' in row:
                            total_time = (row.get('parsing_time', 0) or 0) + (row.get('sort_time', 0) or 0)
                            perc_info.append(f"total_time: {total_time:.6f}")
                        
                        print(f"  {p}th percentile: {v:.6f} ({', '.join(perc_info)})")
            else:
                print(f'No valid data for {col}')

    # Calculate total processing time (sum of relevant times)
    df['total_time'] = df[['decomp_time', 'parsing_time', 'sort_time']].sum(axis=1)

    # Plotting
    # plt.figure(figsize=(16, 10))
    # plt.scatter(df['varnum'], df['total_time'], label='Total Time vs VarNum')
    # plt.scatter(df['clanum'], df['total_time'], label='Total Time vs ClaNum', marker='x')
    # plt.xlabel('Problem Size (Variables/Clauses)')
    # plt.ylabel('Total Processing Time (s)')
    # plt.legend()
    # plt.title('Processing Time vs Problem Size')
    # plt.show()

    # 3D Scatter Plot
    fig = plt.figure(figsize=(16, 10))
    ax = fig.add_subplot(111, projection='3d')
    sc = ax.scatter(df['varnum'], df['clanum'], df['total_time'], c=df['total_time'], cmap='viridis', s=50)
    ax.set_xlabel('Variable Number')
    ax.set_ylabel('Clause Number')
    ax.set_zlabel('Total Processing Time (s)')
    ax.set_title('3D Correlation: VarNum, ClaNum, Processing Time')
    fig.colorbar(sc, ax=ax, label='Total Processing Time (s)')
    fig.savefig('3d_scatter.png')
    print('3D scatter plot saved as 3d_scatter.png')
    # plt.show()

    # 2D Heatmap (Hexbin)
    plt.figure(figsize=(16, 10))
    hb = plt.hexbin(df['varnum'], df['clanum'], C=df['total_time'], gridsize=30, cmap='inferno', reduce_C_function=np.mean)
    plt.colorbar(hb, label='Mean Total Processing Time (s)')
    plt.xlabel('Variable Number')
    plt.ylabel('Clause Number')
    plt.title('Heatmap: Problem Size vs Processing Time')
    plt.savefig('heatmap.png')
    print('Heatmap saved as heatmap.png')
    # plt.show()

    # Optionally, print correlation
    print('Correlation with VarNum:', df['varnum'].corr(df['total_time']))
    print('Correlation with ClaNum:', df['clanum'].corr(df['total_time']))

    # Percentile statistics based on total_time
    # Only use rows where all times are present (not NaN)
    valid = df[df[['decomp_time', 'parsing_time', 'sort_time']].notnull().all(axis=1)]
    if not valid.empty:
        percentiles = [0, 25, 50, 75, 100]
        perc_values = np.percentile(valid['total_time'], percentiles)
        print(f"\nPercentiles for total_time:")
        excel_lines = []
        for p, v in zip(percentiles, perc_values):
            closest_idx = (valid['total_time'] - v).abs().idxmin()
            row = valid.loc[closest_idx]
            print(f"  {p}th percentile: {v:.6f} (file: {row['source_file']}, instance: {row['instance']}, varnum: {row['varnum']}, clanum: {row['clanum']}, decomp_time: {row.get('decomp_time', 'NA')}, parsing_time: {row.get('parsing_time', 'NA')}, sort_time: {row.get('sort_time', 'NA')}, total_time: {row.get('total_time', 'NA')})")
            excel_lines.append(f"{row['varnum']}\n{row['clanum']}\n{row['total_time']}")
        print("\n--- For Excel copy-paste ---")
        print("\n".join(excel_lines))

# Save scope image creation times to CSV if any found
if image_creation_data:
    image_df = pd.DataFrame(image_creation_data)
    image_df.to_csv('scope_image_creation_times.csv', index=False)
    print('Scope image creation times saved to scope_image_creation_times.csv')
    # Print statistics
    print('\n--- Scope Image Creation Time Statistics ---')
    print(f"Max: {image_df['scope_image_creation_time'].max():.6f} s")
    print(f"Min: {image_df['scope_image_creation_time'].min():.6f} s")
    print(f"Mean: {image_df['scope_image_creation_time'].mean():.6f} s")
    print(f"Median: {image_df['scope_image_creation_time'].median():.6f} s")
else:
    print('No scope image creation times found in the logs.')
