import os
import re
import tqdm
import json
import gzip
import itertools


import numpy as np

from enum         import Enum
from collections  import defaultdict, Counter
from typing       import List, Union, Iterable, Dict
from datasets     import load_dataset, load_from_disk

class PromptType(Enum):
    INSTRUCT_PROMPT      = 1
    STANDARD_PROMPT      = 2
    WRITE_PROMPT         = 3
    REPLIT_GLAIVE_PROMPT = 4
    UNSUPROTED_PROMPT    = 5


def stream_jsonl(filename: str) -> Iterable[Dict]:
    """
    Parses each jsonl line and yields it as a dictionary
    """
    if filename.endswith(".gz"):
        with open(filename, "rb") as gzfp:
            with gzip.open(gzfp, 'rt') as fp:
                for line in fp:
                    if any(not x.isspace() for x in line):
                        yield json.loads(line)
    else:
        with open(filename, "r") as fp:
            for line in fp:
                if any(not x.isspace() for x in line):
                    yield json.loads(line)


def write_jsonl(filename: str, data: Iterable[Dict], append: bool = False):
    """
    Writes an iterable of dictionaries to jsonl
    """
    if append:
        mode = 'ab'
    else:
        mode = 'wb'
    filename = os.path.expanduser(filename)
    if filename.endswith(".gz"):
        with open(filename, mode) as fp:
            with gzip.GzipFile(fileobj=fp, mode='wb') as gzfp:
                for x in data:
                    gzfp.write((json.dumps(x) + "\n").encode('utf-8'))
    else:
        with open(filename, mode) as fp:
            for x in data:
                fp.write((json.dumps(x) + "\n").encode('utf-8'))


def read_problems(path_prefix, file_name="openai_humaneval/HumanEval.jsonl.gz") -> Dict[str, Dict]:
    data_file = os.path.join(path_prefix, file_name)
    return {task["task_id"]: task for task in stream_jsonl(data_file)}


def load_dataset(dataset, path_prefix):
    data_path = os.path.join(path_prefix, dataset)
    data      = None
    if os.path.exists(data_path):
        data  =  load_from_disk(data_path)
    else:
        data  = load_dataset(data_path)

    # features: ['task_id', 'prompt', 'canonical_solution', 'test', 'entry_point'],
    # num_rows: 164
    return data['test']   # 返回一个Dataset类型，使用迭代器访问，其中每一项是一个字典


def coding_prompt(prompt,  type : Union[int, PromptType]):
    """
    @brief 返回对应的提示语
    """
    type = PromptType(type)
    assert type in PromptType and type != PromptType.UNSUPROTED_PROMPT, "不支持的prompt类型!"

    
    if type == PromptType.INSTRUCT_PROMPT:
        return f"下面是描述任务的一个指令，根据该指令写出合适的返回结果."       + \
            f"\n\n### 指令：\n无需给出任何测试代码和解释内容，完成如下问题直接给出Python代码："  + \
            f"\n{prompt}\n\n### 回答："
    elif type == PromptType.STANDARD_PROMPT:
        return f"无需任何测试和解释，完成如下问题的Python代码：\n{prompt}"
    elif type == PromptType.WRITE_PROMPT:
        return f"编写一个Python程序完成如下问题： {prompt}"
    else:
        return f"下面是描述任务的一个指令，并配合一个输入以提供更多的上下文信息。" + \
            f"\n根据该指令写出合适的返回结果."                               + \
            f"\n\n ### 指令：\n编写一个程序完成给定的问题。"                  + \
            f"\n\n 输入：\n{prompt}\n\n### 回答：" 
    

#! https://github.com/declare-lab/instruct-eval/blob/main/human_eval/main.py#L30
def filter_code(resp: str, model_name) -> str:
    if "chatglm" in model_name:
        resp = resp.split('"""\n')[-1].replace("`", "")
        
    else:
        resp = resp.lstrip("\n")        # 去除字符串开头的"\n"
        return resp.split("\n\n")[0]    # 切分后取第1个

def fix_indent(text: str, indent:int = 4) -> str:
    """Fix indent: 将'\t'替换为真实的空格缩进"""
    # TODO: check
    return text.replace("\t", " " * indent)
