import torch
import torch.nn as nn
from torch.utils.data import DataLoader, TensorDataset
from pathlib import Path
from typing import Optional, Tuple, Union, Sequence
import os
import pickle
import pandas as pd
import numpy as np
from collections import defaultdict
from util import *
from concurrent.futures import ProcessPoolExecutor, as_completed
import os

class Predicator:
    def __init__(self, trace_file_path=None):
        if trace_file_path==None:
            self.trace_file_path=get_root_dir()+"/data_process/dealed_data/test_trace.csv"
        else:
            self.trace_file_path=trace_file_path
            
        self.ms_number_acq=MicroserviceNumberAcquirer(self.trace_file_path)
        self.lstm=LSTMNetwork(128)
     
     
        
class MicroserviceNumberAcquirer():
    
    def __init__(self, trace_file_path=None):
        
        self.trace_file_path=trace_file_path
        self.base_file_path=self.trace_file_path+".base"
        # with open(self.base_file_path, "rb") as file:
        #     self.service_to_callgraph_dict=pickle.load(file)
        # self.service_name_list=list(self.service_to_callgraph_dict.keys())
        # self.ms_kind_list=[]
        # for service_name in self.service_name_list:
        #     request=self.service_to_callgraph_dict[service_name]
        #     self.ms_kind_list.extend(request.ms_kind_list)
        # self.ms_kind_list.sort()
        # self.ms_kind_running_num=[0]*len(self.ms_kind_list)   #这和ms_kind_list一一对应
        
        
        # self.trace_index=0
        # self.time=0
        # self.trace_info_end_ms=[]
        # self.all_trace_info=[]
        # window_size=129
        # for _ in range(window_size):
        #     self.all_trace_info.append(self.ms_kind_running_num.copy())
        
    

    # ---------------- 子进程要执行的函数 ---------------- #
    def _calc_counts_for_dm(self, args):
        """
        子进程计算单个 dm 在全局采样区间内的运行实例数
        参数:
            args = (dm_name, sub_df, min_ts, max_ts)
        返回:
            (dm_name, counts_list)
        """
        print(f"start: {args}")
        dm_name, sub_df, min_ts, max_ts = args
        ts_range = np.arange(min_ts, max_ts + 1)  # 步长=1

        # 生成变化点 (start,+1) (end,-1)
        starts = np.floor(sub_df["timestamp"].values).astype(int)
        ends   = np.ceil(sub_df["timestamp"].values + sub_df["rt"].values).astype(int)

        # 用列表而不是迭代器，更快
        changes = np.empty(starts.size * 2, dtype=[("t", np.int64), ("d", np.int8)])
        changes["t"][::2], changes["d"][::2] = starts,  1
        changes["t"][1::2], changes["d"][1::2] = ends , -1
        changes.sort(order="t")                       # 按时间升序

        # 扫描线
        cnt, idx, num_change = 0, 0, changes.size
        counts = np.zeros(ts_range.size, dtype=np.int32)

        for i, t in enumerate(ts_range):
            while idx < num_change and changes["t"][idx] == t:
                cnt += changes["d"][idx]
                idx += 1
            counts[i] = cnt
        print(f"end: {args}")
        return dm_name, counts.tolist()


    # ---------------- 主流程 / 方法实现 ---------------- #
    def get_ms_kind_running_num_by_file(self, n_workers=None):
        """
        返回:
            result_dict: {dm_name: [count0, count1, ...]}
            ts_range   : numpy array，对应 result 中各列表的时间坐标
        """
        df = pd.read_csv(self.trace_file_path, usecols=["timestamp", "dm", "rt"])
        df["timestamp"] = df["timestamp"].astype(float)
        df["rt"]        = df["rt"].astype(float)
        df["dm"]        = df["dm"].astype(str)

        min_ts = int(np.floor(df["timestamp"].min()))
        max_ts = int(np.ceil((df["timestamp"] + df["rt"]).max()))
        ts_range = np.arange(min_ts, max_ts + 1)

        # 分组参数准备好发给进程池
        group_iter = df.groupby("dm")
        tasks = [
            (dm, sub_df[["timestamp", "rt"]].copy(), min_ts, max_ts)
            for dm, sub_df in group_iter
        ]

        # 开启进程池
        result = {}
        workers = n_workers or os.cpu_count() or 1
        with ProcessPoolExecutor(max_workers=workers) as executor:
            # 提交全部任务
            futures = [executor.submit(self._calc_counts_for_dm, t) for t in tasks]

            for fut in as_completed(futures):
                dm_name, counts = fut.result()
                result[dm_name] = counts

        return result, ts_range

                
        
    
    def get_next_ms_kind_running_num_by_input(self):
        
        
        
        
        return self.ms_kind_running_num.copy()
    
    def record_trace_info(self, trace_info):
        #trace info 是包含元组的list，（ms_kind, start_time, end_time）duration 表示从现在开始后的持续时间
        for (ms_kind, ) in trace_info:
            self.trace_info_end_ms
        
        
        
        
class LSTMNetwork(nn.Module):
    """
    A reusable LSTM network that supports both training and inference.

    Key features
    ------------
    • Works for sequence‐to‐label tasks (classification or regression).
    • Minimal boilerplate: fit(), evaluate(), predict(), save(), load().
    • Handles GPU/CPU transparently.
    """

    def __init__(
        self,
        input_dim: int,
        hidden_dim: int = 128,
        output_dim: int = 1,
        num_layers: int = 2,
        dropout: float = 0.2,
        bidirectional: bool = False,
    ):
        super().__init__()

        self.hidden_dim = hidden_dim
        self.num_layers = num_layers
        self.bidirectional = bidirectional
        directions = 2 if bidirectional else 1

        self.lstm = nn.LSTM(
            input_dim,
            hidden_dim,
            num_layers=num_layers,
            dropout=dropout if num_layers > 1 else 0.0,
            batch_first=True,
            bidirectional=bidirectional,
        )
        self.fc = nn.Linear(hidden_dim * directions, output_dim)

    # ------------------------------------------------------------------ #
    # Forward & helper methods
    # ------------------------------------------------------------------ #
    def forward(self, x: torch.Tensor) -> torch.Tensor:
        """
        x: (batch, seq_len, input_dim)
        Returns: (batch, output_dim)
        """
        # h_0/c_0 auto-initialized to zeros if not provided
        output, _ = self.lstm(x)
        # Use the last time step’s output
        last_output = output[:, -1, :]
        return self.fc(last_output)

    # ------------------------------------------------------------------ #
    # Training utilities
    # ------------------------------------------------------------------ #
    def fit(
        self,
        train_data: Union[TensorDataset, DataLoader, Tuple[torch.Tensor, torch.Tensor]],
        val_data: Optional[
            Union[TensorDataset, DataLoader, Tuple[torch.Tensor, torch.Tensor]]
        ] = None,
        epochs: int = 20,
        batch_size: int = 64,
        lr: float = 1e-3,
        weight_decay: float = 0.0,
        loss_fn: Optional[nn.Module] = None,
        device: Optional[torch.device] = None,
        verbose: bool = True,
    ):
        """
        Convenience wrapper around a standard training loop.
        """
        if device is None:
            device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
        self.to(device)

        # Build DataLoaders
        def _to_loader(
            ds: Union[TensorDataset, DataLoader, Tuple[torch.Tensor, torch.Tensor]]
        ) -> DataLoader:
            if isinstance(ds, DataLoader):
                return ds
            if isinstance(ds, tuple):
                ds = TensorDataset(*ds)
            return DataLoader(ds, batch_size=batch_size, shuffle=True)

        train_loader = _to_loader(train_data)
        val_loader = _to_loader(val_data) if val_data is not None else None

        # Optimizer / loss
        optimizer = torch.optim.Adam(self.parameters(), lr=lr, weight_decay=weight_decay)
        if loss_fn is None:
            # Auto-select MSE for regression (output_dim==1) else cross-entropy
            loss_fn = (
                nn.MSELoss()
                if self.fc.out_features == 1
                else nn.CrossEntropyLoss()
            )

        for ep in range(1, epochs + 1):
            self.train()
            epoch_loss = 0.0
            for xb, yb in train_loader:
                xb, yb = xb.to(device), yb.to(device)
                pred = self(xb)
                loss = loss_fn(pred.squeeze(), yb)
                optimizer.zero_grad()
                loss.backward()
                optimizer.step()
                epoch_loss += loss.item() * xb.size(0)

            epoch_loss /= len(train_loader.dataset)

            # Validation phase
            if val_loader:
                self.eval()
                val_loss = 0.0
                with torch.no_grad():
                    for xb, yb in val_loader:
                        xb, yb = xb.to(device), yb.to(device)
                        pred = self(xb)
                        val_loss += loss_fn(pred.squeeze(), yb).item() * xb.size(0)
                val_loss /= len(val_loader.dataset)
                if verbose:
                    print(f"Epoch {ep:02d}/{epochs} ┃ train loss={epoch_loss:.4f} ┃ val loss={val_loss:.4f}")
            elif verbose:
                print(f"Epoch {ep:02d}/{epochs} ┃ train loss={epoch_loss:.4f}")

    # ------------------------------------------------------------------ #
    # Inference
    # ------------------------------------------------------------------ #
    @torch.no_grad()
    def predict(
        self,
        x: Union[torch.Tensor, Sequence[torch.Tensor]],
        device: Optional[torch.device] = None,
        batch_size: int = 128,
    ) -> torch.Tensor:
        """
        x: Tensor or list/array of sequences, each shaped (seq_len, input_dim)
        Returns predictions as a tensor on CPU.
        """
        if isinstance(x, torch.Tensor):
            dataset = TensorDataset(x)
        else:
            x = torch.stack(list(x))
            dataset = TensorDataset(x)

        loader = DataLoader(dataset, batch_size=batch_size, shuffle=False)

        if device is None:
            device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
        self.to(device)
        self.eval()

        preds = []
        for (xb,) in loader:
            xb = xb.to(device)
            out = self(xb)
            preds.append(out.cpu())
        return torch.cat(preds, dim=0)

    # ------------------------------------------------------------------ #
    # Persistence helpers
    # ------------------------------------------------------------------ #
    def save(self, path: Union[str, Path]):
        """
        Save entire model (architecture + weights). Use load() to reload.
        """
        torch.save(self, Path(path))

    @staticmethod
    def load(path: Union[str, Path], device: Optional[torch.device] = None):
        """
        Load a model saved with save().
        """
        if device is None:
            device = torch.device("cpu")
        return torch.load(Path(path), map_location=device)



if __name__ == "__main__":
    my_predicator=Predicator()
    result, ts_range=my_predicator.ms_number_acq.get_ms_kind_running_num_by_file(32)
    print(f"ms number:{len(result.keys())}")
    print(f"time range{ts_range}")
    
    
    # Dummy dataset: sequence length = 10, input_dim = 5
    # torch.manual_seed(0)
    # X = torch.randn(1000, 10, 5)
    # y = torch.randn(1000)  # regression example

    # model = LSTMNetwork(input_dim=5, hidden_dim=64, output_dim=1)
    # model.fit((X, y), epochs=5, batch_size=32)

    # pred = model.predict(X[:3])
    # print("Sample predictions:", pred.squeeze().tolist())

    # # Save & reload
    # model.save("demo_lstm.pt")
    # reloaded = LSTMNetwork.load("demo_lstm.pt")
    # print("Reloaded prediction:", reloaded.predict(X[:3]).squeeze().tolist())
