'''
The Predictor computes operator timing formulas based on the data collected by the profiler,
and serves as a module for the llm_engine to predict TTFT (Time To First Token) and TPOT (Time Per Output Token).

The formulas used by the Predictor are derived from the profiling data and follow the methodology described in the paper. 
'''
from sklearn.linear_model import LinearRegression
from typing import Dict,List
from abc import abstractmethod,ABC
import pandas as pd
import numpy as np
import ray

from mixserve.utils import Prompt_struct
@ray.remote
def remote_train_linear_model(db_path: str, table: str, x_cols: List[str], y_col: str, poly_degree: int = 1):
    import os
    import sqlite3
    import pandas as pd
    import numpy as np
    from sklearn.linear_model import LinearRegression
    from sklearn.preprocessing import PolynomialFeatures
    from importlib.resources import files

    path = os.path.join(files('mixserve'), db_path)
    conn = sqlite3.connect(path)
    df = pd.read_sql_query(f"SELECT {', '.join(x_cols + [y_col])} FROM {table}", conn)
    conn.close()

    if df.empty:
        return None

    X = df[x_cols]
    y = df[y_col]

    poly = None
    valid_indices = None

    if poly_degree > 1:
        poly = PolynomialFeatures(degree=poly_degree, include_bias=False, interaction_only=True)
        X_poly_all = poly.fit_transform(X)
        feature_names_all = poly.get_feature_names_out(x_cols)

        valid_names = {
        "input_length",
        "batchsize",
        "input_length batchsize",
        "current_length batchsize"
            }

        valid_indices = [
            i for i, name in enumerate(feature_names_all)
            if name in valid_names
        ]

        X = X_poly_all[:, valid_indices]

    model = LinearRegression()
    model.fit(X, y)

    return {
        "model": model,
        "poly": poly,
        "valid_indices": valid_indices
    }

   
class Predictor(ABC):
    attention_formulae:Dict
    GEMM_formulae: Dict[int, LinearRegression]
    MoE_formulae: Dict[int, LinearRegression]
    finish_modeling: bool
    Moe:bool
    def __init__(self,Moe:bool):
        self.attention_formulae = {}
        self.GEMM_formulae = {}
        self.MoE_formulae = {}
        self.finish_modeling = False
        self.Moe=Moe
    @abstractmethod
    async def train_formula_attention(self, logical_gpu_id: int, tp_size: int = 1, pp_size: int = 1):
        raise NotImplementedError

    @abstractmethod
    async def train_formula_gemm(self, logical_gpu_id: int, tp_size: int = 1, pp_size: int = 1):
        raise NotImplementedError

    @abstractmethod
    async def train_formula_moe(self, logical_gpu_id: int, tp_size: int = 1, pp_size: int = 1):
        raise NotImplementedError

class TTFT_predictor(Predictor):
    def __init__(self,Moe:bool):
        super().__init__(Moe=Moe)

    async def train_formula_attention(self, logical_gpu_id: int, tp_size: int = 1, pp_size: int = 1):
        gpu_id = logical_gpu_id
        db_path = f"benchdb/test_device{gpu_id}_tp{tp_size}_pp{pp_size}.db"
        x_cols = ['input_tokens']
        y_col = 'prefill_attention'
        result = await remote_train_linear_model.remote(
            db_path=db_path,
            table='prefill_attention',
            x_cols=x_cols,
            y_col=y_col
        )
        if result:
            model = result["model"]
            terms = [f"{coef:.4f} * {name}" for coef, name in zip(model.coef_, x_cols)]
            formula = " + ".join(terms) + f" + {model.intercept_:.4f}"
            print("prefill attention formulae：")
            print(f"y = {formula}")
            self.attention_formulae[gpu_id] = model


    async def train_formula_gemm(self, logical_gpu_id: int, tp_size: int = 1, pp_size: int = 1):
        gpu_id = logical_gpu_id
        db_path = f"benchdb/test_device{gpu_id}_tp{tp_size}_pp{pp_size}.db"
        x_cols = ['input_tokens']
        y_col = 'prefill_gemm'
        result = await remote_train_linear_model.remote(
            db_path=db_path,
            table='prefill_gemm',
            x_cols=x_cols,
            y_col=y_col
        )
        if result:
            model = result["model"]
            terms = [f"{coef:.4f} * {name}" for coef, name in zip(model.coef_, x_cols)]
            formula = " + ".join(terms) + f" + {model.intercept_:.4f}"
            print("prefill gemm formulae：")
            print(f"y = {formula}")
            self.GEMM_formulae[gpu_id] = model


    async def train_formula_moe(self, logical_gpu_id: int, tp_size: int = 1, pp_size: int = 1):
        if not self.Moe:
            return
        gpu_id = logical_gpu_id
        db_path = f"benchdb/test_device{gpu_id}_tp{tp_size}_pp{pp_size}.db"
        result = await remote_train_linear_model.remote(
            db_path=db_path,
            table='prefill_moe',
            x_cols=['input_tokens'],
            y_col='prefill_moe'
        )
        if result:
            self.MoE_formulae[gpu_id] = result['model']
            
    def predict_TTFT(self,gpu_ids:list[list[int]],token_nums:np.array)->float:
        '''predict TTFT
        TTFT contains calculation time and waiting time. token_nums is the number of groups obtained by dividing the current token number 
        to be processed by the chunksize.The size of token_nums is  (⌈token_num/chunksize⌉,min(chunksize,token_num in the batch)).
        ''' 
        TTFT:float=0
        for pp_idx in gpu_ids:
            
            gpu_id=pp_idx[0]
            attn_formula=self.attention_formulae.get(gpu_id)
            GEMM_formula=self.GEMM_formulae.get(gpu_id)
            if self.Moe:
                MoE_formula=self.MoE_formulae.get(gpu_id)
                assert MoE_formula is not None,f"Error!GPU {gpu_id} moe part hasn't been profiled"
            assert attn_formula is not None and GEMM_formula is not None,f"Error!GPU {gpu_id} hasn't been profiled!"
            X_token = pd.DataFrame([[token_nums]], columns=["input_tokens"])
            attn_duration=attn_formula.predict(X_token)
            GEMM_duration=GEMM_formula.predict(X_token)
            if self.Moe:
                MoE_duration=MoE_formula.predict(X_token)
                TTFT+=np.sum(MoE_duration)
            TTFT+=np.sum(attn_duration+GEMM_duration)

        return TTFT*10e-6 #convert Microsecond to Second
    
class TPOT_predictor(Predictor):
    def __init__(self,Moe:bool):
        super().__init__(Moe=Moe)

    async def train_formula_attention(self, logical_gpu_id: int, tp_size: int = 1, pp_size: int = 1):
        gpu_id = logical_gpu_id
        db_path = f"benchdb/test_device{gpu_id}_tp{tp_size}_pp{pp_size}.db"
        x_cols = ['input_length', 'current_length', 'batchsize']
        y_col = 'decode_attention'
        result = await remote_train_linear_model.remote(
            db_path=db_path,
            table='decode_attention',
            x_cols=x_cols,
            y_col=y_col,
            poly_degree=2
        )
        if result:
            model = result["model"]
            poly = result["poly"]
            valid_indices = result["valid_indices"]
            feature_names = poly.get_feature_names_out(x_cols) if poly else x_cols
            filtered_names = [feature_names[i] for i in valid_indices] if valid_indices else x_cols
            terms = [f"{coef:.4f} * {name}" for coef, name in zip(model.coef_, filtered_names)]
            formula = " + ".join(terms) + f" + {model.intercept_:.4f}"
            print("decode attention formulae：")
            print(f"y = {formula}")
            self.attention_formulae[gpu_id] = result


    async def train_formula_gemm(self, logical_gpu_id: int, tp_size: int = 1, pp_size: int = 1):
        gpu_id = logical_gpu_id
        db_path = f"benchdb/test_device{gpu_id}_tp{tp_size}_pp{pp_size}.db"
        x_cols = ['batchsize']
        y_col = 'decode_gemm'
        result = await remote_train_linear_model.remote(
            db_path=db_path,
            table='decode_gemm',
            x_cols=x_cols,
            y_col=y_col
        )
        if result:
            model = result["model"]
            terms = [f"{coef:.4f} * {name}" for coef, name in zip(model.coef_, x_cols)]
            formula = " + ".join(terms) + f" + {model.intercept_:.4f}"
            print("decode gemm formulae：")
            print(f"y = {formula}")
            self.GEMM_formulae[gpu_id] = model


    async def train_formula_moe(self, logical_gpu_id: int, tp_size: int = 1, pp_size: int = 1):
        if not self.Moe:
            return
        gpu_id = logical_gpu_id
        db_path = f"benchdb/test_device{gpu_id}_tp{tp_size}_pp{pp_size}.db"
        result = await remote_train_linear_model.remote(
            db_path=db_path,
            table='decode_moe',
            x_cols=['batchsize'],
            y_col='decode_moe'
        )
        if result:
            self.MoE_formulae[gpu_id] = result['model']
    
    def predict_TPOT(self,gpu_ids:list[list[int]],requests:List[List[Prompt_struct]])->float:
        '''
        requests:[num_batches:prompt in the batch]
        '''
        TPOT:float=0
        for i,pp_layer in enumerate(gpu_ids):
            batchsize=len(requests[i])
            if batchsize==0:
                continue
            input_avg=sum([request.input_length for request in requests[i]])/len(requests[i])
            output_avg=sum([request.current_length for request in requests[i]])/len(requests[i])
            attn_formula=self.attention_formulae.get(pp_layer[0])
            GEMM_formula=self.GEMM_formulae.get(pp_layer[0])
            if self.Moe:
                MoE_formula=self.MoE_formulae.get(pp_layer[0])
                assert MoE_formula is not None,f"Error!GPU {pp_layer[0]} Moe hasn't been profiled"
            assert attn_formula is not None and GEMM_formula is not None ,f"Error!GPU {pp_layer[0]} hasn't been profiled!"
            X_raw_df = pd.DataFrame(
                [[input_avg, output_avg, batchsize]],
                columns=["input_length", "current_length", "batchsize"]
            )

            X_poly = attn_formula["poly"].transform(X_raw_df)
            X_filtered = X_poly[:, attn_formula["valid_indices"]]
            attn_duration = attn_formula["model"].predict(X_filtered)

            X_gemm_df = pd.DataFrame([[batchsize]], columns=["batchsize"])
            GEMM_duration = GEMM_formula.predict(X_gemm_df)

            if self.Moe:
                X_moe_df = pd.DataFrame([[batchsize]], columns=["batchsize"])
                MoE_duration = MoE_formula.predict(X_moe_df)
                TPOT += np.sum(MoE_duration)

            TPOT += np.sum(attn_duration + GEMM_duration)

        return TPOT*10e-6
 