# task基类，后面要根据基类，来构建子类。
'''
task需要注册，将data、llm、parser实例化和调用组合成pipeline。
1.模型内部要实现批量调用的功能，不管是串行还是并行。
2. 组batch由data管理
3. task负责串行执行每个batch，并对中间结果及时缓存到tmp，完成后再rename为最终outfile
'''
import time
from dalchemy.data import DalchemyData, TextHelper
from dalchemy.llms import DalchemyLLM
from dalchemy.parsers import DalchemyParser
from typing import Optional
from dataclasses import dataclass, field
import shutil

txthelper = TextHelper()


@dataclass
class TaskArgs:
    """
    Arguments for TASK.
    """
    ######### pipe ########
    data_name: Optional[str] = field(
        default="",
        metadata={"help": ""}
    )
    llm_name: Optional[str] = field(
        default="",
        metadata={"help": ""}
    )

    parser_name: Optional[str] = field(
        default="",
        metadata={"help": ""}
    )
    ######### pipe ########

    out_file: Optional[str] = field(
        default="result.txt",
        metadata={"help": "output file."}
    )

    tmp_file: Optional[int] = field(
        default="step_result.txt",
        metadata={"help": "incremently save each step result."}
    )
    sample_sep: Optional[int] = field(
        default="\n\n",
        metadata={"help": "sample_sep."}
    )

    batch_size: Optional[int] = field(
        default=10,
        metadata={"help": "batch_size, prompt batch size."}
    )
    num_procs: Optional[int] = field(
        default=3,
        metadata={"help": "num process."}
    )

    sleep_sec: Optional[int] = field(
        default=3,
        metadata={"help": "sleep secs."}
    )

    start: Optional[int] = field(
        default=0,
        metadata={"help": "prompt start idx"}
    )
    end: Optional[int] = field(
        default=-1,
        metadata={"help": "prompt end idx"}
    )


class DalchemyTask(object):
    ''' 任务：包含任务配置、数据加载(组batch)、调用模型、进度-状态管理、解析、过滤等'''

    def __init__(self,
                 args: TaskArgs,
                 data: DalchemyData,
                 llm: DalchemyLLM,
                 parser: DalchemyParser):
        # TODO: 再传个dataclass类，方便管理参数
        self.args = args
        self.data = data
        self.llm = llm
        self.parser = parser

    @classmethod
    def setup_task(cls, args):  # 根据参数，自动构建data、llm、parser
        from dalchemy.data import build_data
        from dalchemy.llms import build_llm
        from dalchemy.parsers import build_parser
        data = build_data(args.data_name)
        # 利用具体的数据类，和输入参数构建data
        llm = build_llm(args.llm_name)
        parser = build_parser(args.parser_name)
        return cls(args, data, llm, parser)

    def run_task(self, ):
        # start和end是sample的开始和结束
        # 0.params
        # TODO： 参数不要硬编码，写数据类？
        args = self.args
        # batch_size = 1
        # num_procs = 1
        # sleep_sec = 1
        # sample_sep = "\n\n"
        # tmp_file = "step_result.txt"
        # res_file = "result.txt"

        # 1.data
        prompts = self.data.generate_prompts(parser=self.parser)
        batch_prompts = self.data.make_batches(prompts[args.start: args.end], batch_size=args.batch_size)

        # 2.call
        sample_idx = args.start
        # if start>0:
        #     sample_idx += start * batch_size
        results = []
        print(f"total batches: {len(batch_prompts)}")
        for batch_idx, batch_prompt in enumerate(batch_prompts):
            print(f"start idx: {sample_idx},batch idx:{batch_idx} batch len: {len(batch_prompt)}")
            step_results = self.llm.generate(batch_prompt, num_procs=args.num_procs)
            self.save_results(step_results, outfile=args.tmp_file, start=sample_idx, sep=args.sample_sep)
            results.extend(step_results)
            # TODO: 记录进度日志、方便恢复。 8/9 参考fairseq的log写日志
            time.sleep(args.sleep_sec)

            sample_idx += len(batch_prompt)

        # 5. write(rename tmp) final res （每步都存储结果为tmp，直到写完整在rename为outfile）
        # self.save_results(results, outfile=res_file, sep=sample_sep)
        shutil.copy(args.tmp_file, args.out_file)
        return results

    def state_dict(self):
        # 状态字典，用于恢复生成任务 (对prompt进行hash防止重复生成，并记录每个输入的错误信息)
        pass

    def save_results(self, results, outfile="step_result.txt", start=0, sep=None):
        ''' 保存解析的结果，可以重写支持不同类型的输出，如json、txt '''
        parsed_results = self.parser.parse_result(results)
        # 保存idx
        for total_id, batch_sample_id in enumerate(range(len(parsed_results)), start=start):
            for out_id in range(len(parsed_results[batch_sample_id])):
                parsed_results[batch_sample_id][out_id]["idx"] = total_id
        parsed_results = [txthelper.jsonify_list(result_ls) for result_ls in
                          parsed_results]  # List[str]，每个sample的多个结果\n分割

        txthelper.write_lines_append(res_ls=parsed_results, outfile=outfile, sep=sep)
