from typing import List, Tuple
import json
import random
import os, sys
import argparse
import tqdm

import pandas as pd
import numpy as np
from transformers import PreTrainedTokenizerBase, AutoTokenizer

from structs import TestRequest, Dataset,TraceRequest

TOTAL_TOKEN=2048
def generate_random_prompt(prompt_len: int, tokenizer) -> str:

    # 使用 tokenizer.vocab 获取所有 token，然后随机选取拼接
    vocab = list(tokenizer.get_vocab().keys())
    prompt_ids = random.choices(vocab, k=prompt_len)
    # 有些 tokenizer 可能不能直接 decode 单个 token，需要 encode-decode roundtrip
    ids = tokenizer.convert_tokens_to_ids(prompt_ids)
    prompt = tokenizer.decode(ids, skip_special_tokens=True)
    return prompt

def read_dataset(
    dataset_path: str,
    tokenizer: PreTrainedTokenizerBase,
    name: str,
    args: argparse.Namespace,
) -> Dataset:
    """
    read_dataset: Read the given dataset and return a list of TestRequest.
    """
    TOTAL_TOKEN=args.total_token
    if name.lower() == "sharegpt":
        # Load the dataset.
        
        dataset = []
        with open(dataset_path, "r", encoding="utf-8") as f:
            for line in f:
                line = line.strip()
                if line:
                    example = json.loads(line)
                    dataset.append(example)
            
        result: List[TestRequest] = []
        for data in tqdm.tqdm(dataset):
            num_conversations = len(data["conversations"])
            
            # Filter out the conversations with less than args.sharegpt_min_turns turns.
            if num_conversations < args.sharegpt_min_turns or \
                num_conversations < args.sharegpt_min_prompt_turns + 1:
                continue
                
            num_prompt_turns = random.randint(
                args.sharegpt_min_prompt_turns,
                min(num_conversations - 1, args.sharegpt_max_prompt_turns)
            )
            
            prompt = "\n".join([data["conversations"][i]["value"] for i in range(num_prompt_turns)])
            completion = data["conversations"][num_prompt_turns]["value"]
            prompt_token_ids = tokenizer(prompt).input_ids
            completion_token_ids = tokenizer(completion).input_ids
            
            prompt_len = len(prompt_token_ids)
            output_len = len(completion_token_ids)
            if prompt_len < 4 and output_len < 4:
                # Prune too short sequences.
                continue
            if prompt_len + output_len >= TOTAL_TOKEN:
                # Prune too long sequences. (It exceeded max_positional_embedding)
                continue
            
            result.append(TestRequest(prompt, prompt_len, output_len))
        
        # return Dataset(f"sharegpt-mt-{args.sharegpt_min_turns}-mipt-{args.sharegpt_min_prompt_turns}-mxpt-{args.sharegpt_max_prompt_turns}", result)
        return Dataset(f"sharegpt", result)

    elif name.lower() == "longbench":
        # find all .jsonl files under the dataset_path
        files = []
        print(dataset_path)
        for root, dirs, filenames in os.walk(dataset_path):
            for filename in filenames:
                if filename.endswith(".jsonl"):
                    files.append(os.path.join(root, filename))
        
        filtered_dataset = []
        for file in tqdm.tqdm(files):
            with open(file, "r") as f:
                for line in f.readlines():
                    if line.strip() == "": continue
                    data = json.loads(line)
                    
                    context = data["context"][:40000]    # truncate to the first 40000 chars to reduce tokenization time
                    context_token_ids = tokenizer(context).input_ids
                    answer_token_ids = tokenizer(data["answers"][0]).input_ids
                    context_len = len(context_token_ids)
                    answer_len = len(answer_token_ids)
                    
                    context_len_allowed = min(TOTAL_TOKEN - answer_len, random.randint(args.longbench_min_prompt_len, args.longbench_max_prompt_len))
                    context_token_ids = context_token_ids[:context_len_allowed]
                    
                    filtered_dataset.append(TestRequest(
                        tokenizer.decode(context_token_ids),
                        len(context_token_ids),
                        answer_len
                    ))
                    
        # return Dataset(f"longbench-mipl-{args.longbench_min_prompt_len}-mxpl-{args.longbench_max_prompt_len}", filtered_dataset)
        return Dataset(f"longbench", filtered_dataset)
    
    elif name.lower()== "burstgpt":
        
        start_time=args.burst_gpt_start_time
        end_time=args.burst_gpt_end_time
        
        files = []
        for root, dirs, filenames in os.walk(dataset_path):
            for filename in filenames:
                if filename.endswith(".csv"):
                    files.append(os.path.join(root, filename))
        trace_list: List[TraceRequest] = []
        for file in tqdm.tqdm(files):
            df = pd.read_csv(file)

            df = df[(df['Timestamp'] >= start_time) & (df['Timestamp'] <= end_time)]
            df = df[df['Total tokens'] < TOTAL_TOKEN]

            df = df.sort_values(by='Timestamp').reset_index(drop=True)

            
            timestamps = df['Timestamp'].tolist()

            for i, row in df.iterrows():
                prompt_len = int(row['Request tokens'])
                output_len = int(row['Response tokens'])
                interval = timestamps[i + 1] - row['Timestamp'] if i < len(df) - 1 else 0
                prompt = generate_random_prompt(prompt_len, tokenizer)

                trace_list.append(TraceRequest(
                    prompt=prompt,
                    prompt_len=prompt_len,
                    output_len=output_len,
                    interval=interval
                ))       
        return Dataset(f"burstgpt", trace_list)
    
    elif name.lower() == "math-500":
        files = []
        for root, dirs, filenames in os.walk(dataset_path):
            for filename in filenames:
                if filename.endswith(".jsonl"):
                    files.append(os.path.join(root, filename))
        filtered_dataset = []
        for file in tqdm.tqdm(files):
            with open(file, "r") as f:
                for line in f.readlines():
                    if line.strip() == "": continue
                    data = json.loads(line)
                    
                    context = data["problem"][:40000]    # truncate to the first 40000 chars to reduce tokenization time
                    context_token_ids = tokenizer(context).input_ids
                    answer_token_ids = tokenizer(data["solution"]+data["answer"]).input_ids
                    context_len = len(context_token_ids)
                    answer_len = len(answer_token_ids)
                    
                    context_len_allowed = min(TOTAL_TOKEN - answer_len, random.randint(args.math500_min_prompt_len, args.math500_max_prompt_len))
                    context_token_ids = context_token_ids[:context_len_allowed]
                    
                    filtered_dataset.append(TestRequest(
                        tokenizer.decode(context_token_ids),
                        len(context_token_ids),
                        answer_len
                    ))
        return Dataset(f"math500", filtered_dataset)
    else:
        raise ValueError(f"Unsupported dataset name: {name}")

if __name__ == "__main__":
    parser = argparse.ArgumentParser()
    parser.add_argument("--dataset", type=str, default="sharegpt")
    parser.add_argument("--dataset-path", type=str, required=True)
    parser.add_argument("--tokenizer", type=str, required=True)
    parser.add_argument("--trust-remote-code", action="store_true")
    parser.add_argument("--output-path", type=str, required=True)
    parser.add_argument("--seed", type=int, default=0)
    
    parser.add_argument("--sharegpt-min-turns", type=int, default=3)
    parser.add_argument("--sharegpt-min-prompt-turns", type=int, default=1)
    parser.add_argument("--sharegpt-max-prompt-turns", type=int, default=1000)
    
    parser.add_argument("--longbench-min-prompt-len", type=int, default=1900)
    parser.add_argument("--longbench-max-prompt-len", type=int, default=2048)
    
    parser.add_argument("--math500-min-prompt-len", type=int, default=1900)
    parser.add_argument("--math500-max-prompt-len", type=int, default=2048)
    
    parser.add_argument("--burst-gpt-start-time",type=int,default=40000 )
    parser.add_argument("--burst-gpt-end-time",type=int,default=41000)
    
    parser.add_argument("--total-token",type=int,default=2048)
    args = parser.parse_args()
    
    random.seed(args.seed)
    np.random.seed(args.seed)
    
    tokenizer = AutoTokenizer.from_pretrained(args.tokenizer, trust_remote_code=args.trust_remote_code)
    dataset = read_dataset(args.dataset_path, tokenizer, args.dataset, args)
    print(f"Loaded {len(dataset.reqs)} TestRequests from dataset {args.dataset_path}")
    dataset.dump(args.output_path)
    print(f"Saved to {args.output_path}")
    