# task基类，后面要根据基类，来构建子类。
'''
task需要注册，将data、llm、parser实例化和调用组合成pipeline。
1.模型内部要实现批量调用的功能，不管是串行还是并行。
2. 组batch由data管理
3. task负责串行执行每个batch，并对中间结果及时缓存到tmp，完成后再rename为最终outfile
'''
import time
from dalchemy.data import DalchemyData, TextHelper
from dalchemy.llms import DalchemyLLM
from dalchemy.parsers import DalchemyParser
from typing import  Optional
from dataclasses import  dataclass, field
txthelper = TextHelper()


@dataclass
class TaskArgs:
    """
    Arguments for TASK.
    """
    out_file: Optional[str] = field(
        default="result.txt",
        metadata={"help": "output file."}
    )

    tmp_file: Optional[int] = field(
        default="step_result.txt",
        metadata={"help": "incremently save each step result."}
    )
    batch_size: Optional[int] = field(
        default=10,
        metadata={"help": "batch_size, prompt batch size."}
    )
    num_procs: Optional[int] = field(
        default=3,
        metadata={"help": "num process."}
    )

    sleep_sec: Optional[int] = field(
        default=3,
        metadata={"help": "sleep secs."}
    )

    start: Optional[int] = field(
        default=0,
        metadata={"help": "prompt start idx"}
    )
    end: Optional[int] = field(
        default=-1,
        metadata={"help": "prompt end idx"}
    )


class DalchemyTask(object):
    ''' 任务：包含任务配置、数据加载(组batch)、调用模型、进度-状态管理、解析、过滤等
        todo: 1.使用dataclass作为参数,方便管理 2.调用llm.calc_price,记录所有生成的代价
    '''

    def __init__(self, data: DalchemyData,
                 llm: DalchemyLLM,
                 parser: DalchemyParser):
        # TODO: 再传个dataclass类，方便管理参数
        self.data = data
        self.llm = llm
        self.parser = parser

    @classmethod
    def setup_task(cls, args):  # 根据参数，自动构建data、llm、parser
        data = ''
        # 利用具体的数据类，和输入参数构建data
        llm = lambda x: x
        parser = ""
        return cls(data, llm, parser)

    def run_task(self,batch_size=1, num_procs=1, sleep_sec=1, start=0, end=-1,res_file = "result.txt"):

        # start和end是sample的开始和结束
        # 0.params
        # TODO： 参数不要硬编码，写数据类;并且将通用的部分换个函数
        # batch_size = 1
        # num_procs = 1
        # sleep_sec = 1
        sample_sep = "\n\n"
        tmp_file = "step_result.txt"
        # res_file = "result.txt"

        # 1.data
        prompts = self.data.generate_prompts(parser=self.parser)
        if end == -1:
            end = len(prompts)
        batch_prompts = self.data.make_batches(prompts[start:end], batch_size=batch_size)

        # 2.call
        sample_idx = start
        # if start>0:
        #     sample_idx += start * batch_size
        results = []
        print(f"total batches: {len(batch_prompts)}")
        for batch_idx,batch_prompt in enumerate(batch_prompts):
            lens = [len(p) for p in batch_prompt]
            print("prompts len:", lens)
            print("first prompt",batch_prompt[0])
            print(f"start idx: {sample_idx},batch idx:{batch_idx} batch len: {len(batch_prompt)}")
            step_results = self.llm.generate(batch_prompt, num_procs=num_procs)
            print("step_results",step_results)
            self.save_results(step_results, outfile=tmp_file, start=sample_idx,sep=sample_sep)
            results.extend(step_results)
            # TODO: 记录进度日志、方便恢复。
            time.sleep(sleep_sec)

            sample_idx += len(batch_prompt)

        # 5. write(rename tmp) final res （每步都存储结果为tmp，直到写完整在rename为outfile）
        # self.save_results(results, outfile=res_file, sep=sample_sep)
        import shutil
        shutil.copy(tmp_file, res_file)
        return results

    def state_dict(self):
        # 状态字典，用于恢复生成任务 (对prompt进行hash防止重复生成，并记录每个输入的错误信息)
        pass

    def save_results(self, results, outfile="step_result.txt",start = 0, sep=None):
        ''' 保存解析的结果，可以重写支持不同类型的输出，如json、txt '''
        parsed_results = self.parser.parse_result(results)
        # 保存idx
        for total_id, batch_sample_id in enumerate(range(len(parsed_results)),start= start):
            for out_id in range(len(parsed_results[batch_sample_id])):
                parsed_results[batch_sample_id][out_id]["idx"] = total_id
        parsed_results = [txthelper.jsonify_list(result_ls) for result_ls in
                          parsed_results]  # List[str]，每个sample的多个结果\n分割

        txthelper.write_lines_append(res_ls=parsed_results, outfile=outfile, sep=sep)
