#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Regression Prediction Model
Input four metrics, output one predicted value
"""

import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LinearRegression, Ridge, Lasso
from sklearn.ensemble import RandomForestRegressor, GradientBoostingRegressor
from sklearn.svm import SVR
from sklearn.preprocessing import StandardScaler
from sklearn.metrics import mean_squared_error, r2_score, mean_absolute_error
import warnings
import pickle
import os
from datetime import datetime
warnings.filterwarnings('ignore')

# Set font for plots
plt.rcParams['font.sans-serif'] = ['DejaVu Sans', 'Arial']
plt.rcParams['axes.unicode_minus'] = False

class RegressionPredictor:
    """Regression Predictor Class"""
    
    def __init__(self):
        self.models = {}
        self.scaler = StandardScaler()
        self.best_model = None
        self.best_score = -np.inf
        self.best_model_name = None
        self.feature_names = ['metric1', 'metric2', 'metric3', 'metric4']
        
    def load_timing_data(self, csv_path="l40/extracted_timing_data.csv"):
        """
        Load data from extracted_timing_data.csv
        First column: target value (verify_plus_extend_time_ms)
        Second column: metric1 (total_seq_length)
        Third column: metric2 (total_draft_tokens)
        metric3: fixed value 846
        metric4: fixed value 362.1
        """
        try:
            # Read CSV file
            df = pd.read_csv(csv_path)
            print(f"Successfully loaded file: {csv_path}")
            print(f"Data shape: {df.shape}")
            
            # Check if required columns exist
            required_columns = ['verify_cost', 'total_seq_length', 'total_draft_tokens']
            missing_columns = [col for col in required_columns if col not in df.columns]
            
            if missing_columns:
                raise ValueError(f"Missing required columns: {missing_columns}")
            
            # Create data with the specified structure
            data = pd.DataFrame({
                'metric1': df['total_seq_length'],
                'metric2': df['total_draft_tokens'],
                'metric3': 846,  # Fixed value for all rows
                'metric4': 362.1,  # Fixed value for all rows
                'target': df['verify_cost']
            })
            
            print(f"Data preview:\n{data.head()}")
            print(f"Data statistics:\n{data.describe()}")
            
            return data
            
        except FileNotFoundError:
            print(f"Error: File not found '{csv_path}'")
            print("Please run extract_timing_data.py first to generate the data file")
            return None
        except Exception as e:
            print(f"Error reading data: {e}")
            return None
    
    def prepare_data(self, data):
        """Prepare training and testing data"""
        X = data[self.feature_names]
        y = data['target']
        
        # Data standardization
        X_scaled = self.scaler.fit_transform(X)
        
        # Split data
        X_train, X_test, y_train, y_test = train_test_split(
            X_scaled, y, test_size=0.2, random_state=42
        )
        
        return X_train, X_test, y_train, y_test, X_scaled
    
    def train_models(self, X_train, y_train, X_test, y_test):
        """Train multiple regression models"""
        models = {
            'Linear Regression': LinearRegression(),
            'Ridge Regression': Ridge(alpha=1.0),
            'Lasso Regression': Lasso(alpha=0.1),
            'Random Forest': RandomForestRegressor(n_estimators=100, random_state=42),
            'Gradient Boosting': GradientBoostingRegressor(n_estimators=100, random_state=42)
            #'Support Vector Regression': SVR(kernel='rbf', C=1.0, gamma='scale')
        }
        
        results = {}
        
        for name, model in models.items():
            print(f"Training {name}...")
            model.fit(X_train, y_train)
            
            # Predict
            y_pred = model.predict(X_test)
            
            # Evaluation metrics
            mse = mean_squared_error(y_test, y_pred)
            rmse = np.sqrt(mse)
            mae = mean_absolute_error(y_test, y_pred)
            r2 = r2_score(y_test, y_pred)
            
            results[name] = {
                'model': model,
                'mse': mse,
                'rmse': rmse,
                'mae': mae,
                'r2': r2,
                'predictions': y_pred
            }
            
            print(f"  R² Score: {r2:.4f}")
            print(f"  RMSE: {rmse:.4f}")
            print(f"  MAE: {mae:.4f}")
            print()
            
            # Update best model
            if r2 > self.best_score:
                self.best_score = r2
                self.best_model = model
                self.best_model_name = name
        
        self.models = results
        return results
    
    def predict(self, input_data):
        """
        Make prediction using the best model
        input_data: dictionary or list containing four metrics
        """
        if self.best_model is None:
            raise ValueError("Model not trained yet. Please call train_models method first.")
        
        # Ensure correct input data format
        if isinstance(input_data, dict):
            features = np.array([[
                input_data['metric1'],
                input_data['metric2'],
                input_data['metric3'],
                input_data['metric4']
            ]])
        elif isinstance(input_data, list):
            features = np.array([input_data])
        else:
            raise ValueError("Input data must be dictionary or list format")
        
        # Standardize input data
        features_scaled = self.scaler.transform(features)
        
        # Predict
        prediction = self.best_model.predict(features_scaled)[0]
        
        return prediction
    
    def save_model(self, filepath=None):
        """
        Save the trained model and related data
        """
        if self.best_model is None:
            raise ValueError("No trained model to save")
        
        if filepath is None:
            timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
            filepath = f"regression_model_{timestamp}.pkl"
        
        # Prepare data to save
        model_data = {
            'best_model': self.best_model,
            'best_model_name': self.best_model_name,
            'best_score': self.best_score,
            'scaler': self.scaler,
            'feature_names': self.feature_names,
            'all_models': self.models,
            'timestamp': datetime.now().isoformat()
        }
        
        # Save to file
        with open(filepath, 'wb') as f:
            pickle.dump(model_data, f)
        
        print(f"Model saved to: {filepath}")
        return filepath
    
    def load_model(self, filepath):
        """
        Load a trained model from file
        """
        if not os.path.exists(filepath):
            raise FileNotFoundError(f"Model file not found: {filepath}")
        
        # Load from file
        with open(filepath, 'rb') as f:
            model_data = pickle.load(f)
        
        # Restore model data
        self.best_model = model_data['best_model']
        self.best_model_name = model_data['best_model_name']
        self.best_score = model_data['best_score']
        self.scaler = model_data['scaler']
        self.feature_names = model_data['feature_names']
        self.models = model_data.get('all_models', {})
        
        print(f"Model loaded from: {filepath}")
        print(f"Best model: {self.best_model_name}")
        print(f"Best R² score: {self.best_score:.4f}")
        
        return model_data
    
    def get_model_info(self):
        """Get information about the trained model"""
        if self.best_model is None:
            return "No model trained yet"
        
        info = {
            'best_model_name': self.best_model_name,
            'best_score': self.best_score,
            'feature_names': self.feature_names,
            'model_type': type(self.best_model).__name__
        }
        
        return info
    
    def plot_feature_importance(self):
        """Plot feature importance (if model supports it)"""
        if self.best_model is None:
            print("No trained model")
            return
        
        if hasattr(self.best_model, 'feature_importances_'):
            # Random Forest, Gradient Boosting, etc.
            importances = self.best_model.feature_importances_
        elif hasattr(self.best_model, 'coef_'):
            # Linear models
            importances = np.abs(self.best_model.coef_)
        else:
            print("This model does not support feature importance analysis")
            return
        
        plt.figure(figsize=(10, 6))
        bars = plt.bar(self.feature_names, importances)
        plt.title('Feature Importance Analysis')
        plt.xlabel('Features')
        plt.ylabel('Importance')
        
        # Add value labels
        for bar, importance in zip(bars, importances):
            plt.text(bar.get_x() + bar.get_width()/2, bar.get_height() + 0.01,
                    f'{importance:.3f}', ha='center', va='bottom')
        
        plt.tight_layout()
        plt.savefig('feature_importance.png', dpi=300, bbox_inches='tight')
        plt.show()

def main():
    """Main function"""
    print("=== Regression Prediction Model ===")
    
    # Create predictor
    predictor = RegressionPredictor()
    
    # Load timing data
    print("Loading timing data...")
    data = predictor.load_timing_data()
    
    if data is None:
        print("Failed to load data, exiting program")
        return
    
    print(f"Data shape: {data.shape}")
    print()
    
    # Prepare data
    print("Preparing training data...")
    X_train, X_test, y_train, y_test, X_scaled = predictor.prepare_data(data)
    print(f"Training set size: {X_train.shape}")
    print(f"Test set size: {X_test.shape}")
    print()
    
    # Train models
    print("Starting model training...")
    results = predictor.train_models(X_train, y_train, X_test, y_test)
    
    # Save model
    print("Saving model...")
    model_file = predictor.save_model()
    
    # Plot feature importance
    print("Plotting feature importance...")
    predictor.plot_feature_importance()
    
    # # Example predictions
    # print("\n=== Example Predictions ===")
    # test_inputs = [
    #     {'metric1': 1000, 'metric2': 50, 'metric3': 846, 'metric4': 362.1},
    #     {'metric1': 2000, 'metric2': 100, 'metric3': 846, 'metric4': 362.1},
    #     {'metric1': 1500, 'metric2': 75, 'metric3': 846, 'metric4': 362.1}
    # ]
    
    # for i, test_input in enumerate(test_inputs, 1):
    #     prediction = predictor.predict(test_input)
    #     print(f"Prediction {i}:")
    #     print(f"  Input: {test_input}")
    #     print(f"  Predicted verify+extend time: {prediction:.2f} ms")
    #     print()
    
    # Model summary
    print("=== Model Summary ===")
    print("Best model performance:")
    if predictor.best_model_name:
        best_result = results[predictor.best_model_name]
        print(f"Model: {predictor.best_model_name}")
        print(f"R² Score: {best_result['r2']:.4f}")
        print(f"RMSE: {best_result['rmse']:.4f}")
        print(f"MAE: {best_result['mae']:.4f}")
    
    # # Test model loading
    # print("\n=== Testing Model Loading ===")
    # new_predictor = RegressionPredictor()
    # new_predictor.load_model(model_file)
    
    # # Test prediction with loaded model
    # test_input = {'metric1': 1200, 'metric2': 60, 'metric3': 846, 'metric4': 362.1}
    # prediction = new_predictor.predict(test_input)
    # print(f"Prediction with loaded model: {prediction:.2f} ms")
    
    # print("\nModel training completed!")
    # print(f"Model saved to: {model_file}")
    # print("Generated files:")
    # print("- feature_importance.png: Feature importance analysis")

if __name__ == "__main__":
    main()

