import pandas as pd
import matplotlib.pyplot as plt
import copy
from typing import Optional,List
import datetime
import numpy as np
from transformers import AutoTokenizer
import torch
from structs import TraceRequest
import asyncio
def _get_input_ids(model_dir: str, num_tokens: int) -> torch.Tensor:
        """
        Generate input ids for testing
        """
        global stem_token_ids
        if stem_token_ids is None:
            text = " ".join([str(i) for i in range(1, 1000)])
            tokenizer = AutoTokenizer.from_pretrained(model_dir)
            stem_token_ids = tokenizer(text).input_ids
        input_ids_list = copy.deepcopy(stem_token_ids)
        while len(input_ids_list) < num_tokens:
            input_ids_list += input_ids_list
        input_ids = torch.tensor(input_ids_list[:num_tokens], dtype=torch.int32, device="cpu")
        return input_ids

class TraceManager:
    trace_file:pd.DataFrame
    def __init__(self,csv_file_dir:str):
        self.trace_file=pd.read_csv(csv_file_dir, parse_dates=["timestamp"])
    
    def RequestRate_distribution(self,save_dir:Optional[str]=None):
        df=copy.deepcopy(self.trace_file)
        df["12h_interval"] = df["timestamp"].dt.floor("12H")

        df["second"] = df["timestamp"].dt.floor("S")
   
        request_per_second = df.groupby(["12h_interval", "second"]).size().reset_index(name="requests")
        if save_dir is not None:
            request_per_second.to_csv(save_dir, index=False)
        
    def generate_requests_as_trace(self,time_span:tuple[datetime.datetime,datetime.datetime],model_dir:str)->List[TraceRequest]:
        inputlist:list[TraceRequest]=[]
        assert time_span[0]<=time_span[1],"[Send requests as Azure trace].Error format of time_span!"
        df=copy.deepcopy(self.trace_file)
        filtered_df=df[(df['timestamp'] >= time_span[0]) & (df['timestamp'] <= time_span[1])]
        del df
        filtered_df['next_timestamp'] = filtered_df['timestamp'].shift(-1)
        for index, row in filtered_df.iterrows():
            if not pd.isna(row['next_timestamp']):
                time_diff = (row['next_timestamp'] - row['timestamp']).total_seconds()
                input_ids=(_get_input_ids(model_dir=model_dir,num_tokens=row['ContextTokens'])).tolist()
                traceinput=TraceRequest(input_ids.row['ContextTokens'],row['GeneratedTokens'],time_diff)
                inputlist.append(traceinput)
        return inputlist
    async def trace_request(
        input_requests:List[TraceRequest]
    ):
        for idx,request in enumerate(input_requests):
            yield request
            
            interval = request.interval
            # The next request will be sent after the interval.
            await asyncio.sleep(interval)
        
    