import lightgbm as lgb
import numpy as np
from typing import List, Optional, Union
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.metrics import mean_squared_error, mean_absolute_error, r2_score

class LightGBMPredictor:
    def __init__(self, model_path: str):
        """Initialize with path to LightGBM model file"""
        self.model = lgb.Booster(model_file=model_path)
        
    def predict(self, features: Union[List[List[float]], np.ndarray]) -> np.ndarray:
        """
        Make predictions on input features
        
        Args:
            features: 2D array-like of shape (n_samples, n_features)
            
        Returns:
            Predictions as numpy array
        """
        if not isinstance(features, np.ndarray):
            features = np.array(features, dtype=np.float32)
            
        return self.model.predict(features)
    
class LightGBMTrainer:
    def __init__(self, params: dict = None):
        """Initialize with default or custom parameters"""
        self.default_params = {
            'objective': 'regression',  # default, can be changed
            'metric': 'rmse',
            'boosting_type': 'gbdt',
            'num_leaves': 31,
            'learning_rate': 0.05,
            'feature_fraction': 0.9,
            'verbose': -1
        }
        self.params = {**self.default_params, **(params or {})}
        self.model = None
        
    def train(self, X: pd.DataFrame, y: pd.Series, 
              test_size: float = 0.2, random_state: int = 42,
              early_stopping_rounds: int = 50):
        """
        Train LightGBM model with validation
        
        Args:
            X: Features DataFrame
            y: Target Series
            test_size: Size of validation set (0-1)
            random_state: Random seed
            early_stopping_rounds: Early stopping rounds
            
        Returns:
            Trained model and validation metrics
        """
        # Split data
        X_train, X_val, y_train, y_val = train_test_split(
            X, y, test_size=test_size, random_state=random_state
        )
        
        # Create Dataset objects
        train_data = lgb.Dataset(X_train, label=y_train)
        val_data = lgb.Dataset(X_val, label=y_val, reference=train_data)
        
        # Train model
        self.model = lgb.train(
            params=self.params,
            train_set=train_data,
            valid_sets=[train_data, val_data],
            # num_boost_round=1000,
            # callbacks=[
            #     lgb.early_stopping(early_stopping_rounds),
            #     lgb.log_evaluation(50)
            # ]
        )
        
        # # Evaluate model
        # y_pred = self.model.predict(X_val)
        # metrics = self.evaluate_predictions(y_val, y_pred)
        # print(f"Validation Metrics: {metrics}")
        return self.model
     
    def evaluate_predictions(self, y_true, y_pred):
        """
        Evaluate predictions with multiple metrics including custom accuracy
        
        Args:
            y_true: True values
            y_pred: Predicted values
            
        Returns:
            Dictionary of evaluation metrics
        """
        # Handle NaN values
        mask = ~(np.isnan(y_true) | np.isnan(y_pred))
        y_true = y_true[mask]
        y_pred = y_pred[mask]
        
        # Calculate standard metrics
        mse = mean_squared_error(y_true, y_pred)
        rmse = np.sqrt(mse)
        mae = mean_absolute_error(y_true, y_pred)
        r2 = r2_score(y_true, y_pred)
        
        # Calculate MAPE (avoid division by zero)
        non_zero_mask = y_true != 0
        if np.any(non_zero_mask):
            mape = np.mean(np.abs((y_true[non_zero_mask] - y_pred[non_zero_mask]) / y_true[non_zero_mask])) * 100
        else:
            mape = 0
        
        # Calculate custom accuracy score
        absolute_errors = np.abs(y_true - y_pred)
        accuracy_scores = np.zeros_like(absolute_errors)
        
        # Apply custom accuracy logic
        for i in range(len(absolute_errors)):
            if y_true[i] >= 10:
                accuracy_scores[i] = 1 - (absolute_errors[i] / y_true[i])
            else:
                accuracy_scores[i] = 1 - absolute_errors[i]
        
        # Ensure accuracy scores are within [0, 1]
        accuracy_scores = np.clip(accuracy_scores, 0, 1)
        accuracy = np.mean(accuracy_scores)
        
        return {
            'RMSE': rmse,
            'MAE': mae,
            'R2': r2,
            'MAPE': mape,
            'Accuracy': accuracy
        }
    
    def save_model(self, filepath: str):
        """Save trained model to file"""
        if self.model:
            self.model.save_model(filepath)
        else:
            raise ValueError("Model not trained yet")
'''
df1=pd.read_csv('1.csv')
list1_name=['ID','A','B','C','D','E','F','G','H','I','J','K','L','M','N','O','P','Q','R','S','T']
X = df1[list1_name]
y = df1['U']

# 2. Set up trainer with parameters
params = {'learning_rate': 0.005,
                'n_estimators': 100,
                'min_data_in_leaf':20,
                'max_depth':8,
                'num_leaves': 100,
                'subsample':1,
                'colsample_bytree':0.8,

                'lambda_l1':0.1,
                'lambda_l2':0.2,

                'boosting_type': 'gbdt',
                'objective':'regression',
                'metric': ['huber', 'rmse'],
                #'best_model':'huber',       #输出最佳模型的数值，目前未启用
                
                #'device':'gpu'
                #'gpu':4, 
                #'gpu_platform_id':2,
                #'gpu_device_id':1
                }
trainer = LightGBMTrainer(params)

# 3. Train the model
model = trainer.train(X, y)

# 4. Save the model
trainer.save_model("model.txt")

print("Training complete!")

# 1. Load the trained model
predictor = LightGBMPredictor("model.txt")

# 2. Prepare your input data (example)
test_data = df1[list1_name].values.tolist()

# 3. Make predictions
predictions = predictor.predict(test_data)
print("Predictions:", predictions)

# For binary classification you might want probabilities
# predictions = predictor.predict_proba(test_data)

# For regression you'll get direct values
# predictions = predictor.predict(test_data)
'''