# Configuration resolver for OI problem
from soj.builtin.scoring.subtask import ScoringEngine
from soj.builtin.scoring.config import resolve_scoring_config
from pathlib import Path
from dataclasses import dataclass
from soj.common.util import ensure_file
from typing import TypedDict,Dict
from soj.builtin.compare.config import get_compare_engine_by_dict
from soj.builtin.compare.common import ResultCode
@dataclass
class TestcaseConfig:
    input:Path
    answer:Path
    id:int
    timelimit:int
    memlimit:int
    score:int
    compare:Dict
    def check(self):
        ensure_file(self.input)
        ensure_file(self.answer)

from soj.builtin.environ.workspace import WorkSpace
from soj.common.judge import JudgeResultCode
from soj.builtin.autorun.orchestrator import AutorunOrchestrator
from soj.builtin.compile.orchestrator import CompileOrchestrator
from soj.common.lang import Language
from tempfile import NamedTemporaryFile
class OIResultPerCase(TypedDict):
    score:int=0
    time:int=0
    memory:int=0
    status:JudgeResultCode=JudgeResultCode.SYSTEM_ERROR

class OIProblem:
    def __init__(self,base_dir:Path,meta:dict,scoring:dict):
        self.testcase:Dict[int,TestcaseConfig]={}
        for i in meta.get("testcase",[]):
            self.testcase[i["id"]]=TestcaseConfig(
                input=base_dir/i["input"],
                answer=base_dir/i["answer"],
                id=i["id"],
                timelimit=i.get("time",1000),
                memlimit=i.get("memory",256),
                score=i.get("score",100),
                compare=i.get("compare",{"type":"fulltext"})
            )
            self.testcase[i["id"]].check()
        self.scoring_config=resolve_scoring_config(scoring)
        self.scoring_engine:ScoringEngine|None=None
        self.workspace:WorkSpace|None=None
        self.executable:Path|None=None
        self.autorun:AutorunOrchestrator|None=None
    def release_partcipant(self):
        if self.workspace is not None:
            del self.workspace
            self.workspace=None
        if self.scoring_engine is not None:
            del self.scoring_engine
            self.scoring_engine=None
        if self.executable is not None:
            del self.executable
            self.executable=None
        if self.autorun is not None:
            del self.autorun
            self.autorun=None
    def init_partcipant(self,source:str,lang:Language,settings:dict|None=None):
        cp:CompileOrchestrator=None
        f=NamedTemporaryFile(mode="w+",delete=False)
        f.write(source)
        f.flush()
        cp=CompileOrchestrator(lang,Path(f.name),settings)
        result=cp.compile()
        f.close()
        if not result['ok']: # Originally here used a non-pythonic way to signal compile error
            raise RuntimeError("Compile error: "+result['msg'])
        self.workspace=result['output']
        self.executable=result['artifact']
        self.autorun=AutorunOrchestrator(self.executable,lang,settings)
        # we may need compile settings during execution, such as python O2
        self.scoring_engine=ScoringEngine(self.scoring_config)
        # there is no need to passing full score to ScoringEngine, it will judge the status by its policy
        # currently we don't have a customizable policy for status judging
        # we cannot decide which compare we should use now
    def _judge_per_case(self,case_id:int)->OIResultPerCase:
        if self.autorun is None or self.workspace is None or self.scoring_engine is None:
            raise RuntimeError("Participant not initialized")
        if case_id not in self.testcase:
            raise RuntimeError("Testcase ID not found")
        case=self.testcase[case_id]
        # import input and answer files to workspace
        self.workspace.reprepare_file()
        self.workspace.import_file(self.workspace.FileType.INPUT,case.input)
        self.workspace.import_file(self.workspace.FileType.ANSWER,case.answer)
        # run the executable
        exec_result=self.autorun.run(
            time=case.timelimit,
            mem=case.memlimit,
            input=self.workspace.input_file,
            output=self.workspace.output_file,
            error=self.workspace.error_file,
            report=self.workspace.report_file
        )
        # now check if is not JudgeResult.ACCEPTABLE
        if exec_result['status']!=JudgeResultCode.ACCEPTABLE:
            return OIResultPerCase(
                score=0,
                time=exec_result['time'],
                memory=exec_result['memory'],
                status=exec_result['status']
            )
        # the whole executor module use non-pythonic way to signal error, we have to check ok field
        # that is not elegant, but we have to live with it for now
        # and we won't use CompareOrchestrator here
        # due to its bad design
        ce=get_compare_engine_by_dict(case.compare,
            self.workspace.output_file,
            self.workspace.answer_file
        )
        cmp_result=ce.compare()
        # now handle scoring, but we won't submit it to scoring engine now
        # just return OIResultPerCase back
        result:OIResultPerCase={}
        if cmp_result["result_code"]==ResultCode.FAILED:
            result['score']=0
            result['status']=JudgeResultCode.SYSTEM_ERROR
        elif cmp_result['result_code']!=ResultCode.ACCEPTABLE:
            result['score']=0
            result['status']=JudgeResultCode.WRONG_ANSWER
        else:
            result['score']=case.score
            result['status']=JudgeResultCode.ACCEPTABLE
        result['time']=exec_result['time']
        result['memory']=exec_result['memory']
        return result
        # we finally get the compare result
    def judge(self):
        '''
        judge all testcases and submit to scoring engine,
        then call ScoringEngine.get_result the return it back
        '''
        if self.scoring_engine is None:
            raise RuntimeError("Participant not initialized")
        for case_id in self.testcase:
            res=self._judge_per_case(case_id)
            self.scoring_engine.set_testcase_result(
                id=case_id,
                result=res['score'],
                time=res['time'],
                mem=res['memory'],
                status=res['status']
            )
        return self.scoring_engine.get_result()
    def to_json(self,paintable=True): # you should call judge() first
        if self.scoring_engine is None:
            raise RuntimeError("Participant not initialized")
        return self.scoring_engine.to_json(paintable=paintable)
    # we need to release participant when __del__ is called
    def __del__(self):
        self.release_partcipant()

# so we have added support for the most problems