# we support output only problems in OI in this module
from soj.builtin.environ.workspace import WorkSpace
from soj.common.judge import JudgeResultCode
from soj.builtin.compare.config import get_compare_engine_by_dict
from soj.builtin.compare.common import ResultCode
# there is no need to compile and execute here, we just have to compare two file
from soj.builtin.scoring.subtask import ScoringEngine
from soj.builtin.scoring.config import resolve_scoring_config

from dataclasses import dataclass
from soj.common.util import ensure_file
from typing import TypedDict,Dict
from pathlib import Path
@dataclass
class TestcaseConfig:
    answer:Path
    id:int
    score:int
    compare:Dict
    def check(self):
        ensure_file(self.answer)

class TextResultPerCase(TypedDict):
    score:int=0
    status:JudgeResultCode=JudgeResultCode.SYSTEM_ERROR

# like OIProblem in soj/judgetask/oi/judge.py
class TextProblem:
    def __init__(self,base_dir:Path,meta:dict,scoring:dict):
        '''
        we have same structure of meta as OIProblem, but no input file
        and the scoring config is totally same
        note that we dont have a full score here, each testcase has its own score
        '''
        self.testcase:Dict[int,TestcaseConfig]={}
        for i in meta.get("testcase",[]):
            self.testcase[i["id"]]=TestcaseConfig(
                answer=base_dir/i["answer"],
                id=i["id"],
                score=i.get("score",100),
                compare=i.get("compare",{"type":"fulltext"})
            )
            self.testcase[i["id"]].check()
        self.scoring_config=resolve_scoring_config(scoring)
        self.scoring_engine:ScoringEngine|None=None
        self.workspace:WorkSpace|None=None
    def release_partcipant(self):
        if self.workspace is not None:
            del self.workspace
            self.workspace=None
        if self.scoring_engine is not None:
            del self.scoring_engine
            self.scoring_engine=None
    def init_partcipant(self,outputs:Dict[int,str]):
        '''
        this time, we just need a workspace and scoring engine
        and partcipant dont have executable, but
        them will submit some output file to us for judging
        outputs: Dict[int,str]: mapping from testcase id to output file path
        '''
        self.outputs=outputs
        self.workspace=WorkSpace(create_files=False)
        # we dont need output/report/error files
        self.scoring_engine=ScoringEngine(self.scoring_config)
    def _judge_per_case(self,case_id:int):
        if not self.workspace or not self.scoring_engine:
            raise RuntimeError("Participant not initialized")
        if case_id not in self.testcase:
            raise KeyError("Testcase not found")
        # note that outputs[case_id] is users output file content
        # we need to write it to a file first,and if it is not found, we treat it as empty file
        user_output=self.workspace.create_file(filename="output.txt")
        with open(user_output,'w',encoding='utf-8') as f:
            f.write(self.outputs.get(case_id,""))
        case=self.testcase[case_id]
        # import answer file to workspace
        self.workspace.import_file(self.workspace.FileType.ANSWER,case.answer)
        # now we can compare
        compare_engine=get_compare_engine_by_dict(case.compare,user_output,self.workspace.answer_file)
        result=compare_engine.compare()
        # now we handle the result like that in OI judge
        final_result:TextResultPerCase={}
        if result['result_code']==ResultCode.ACCEPTABLE:
            final_result['score']=case.score
            final_result['status']=JudgeResultCode.ACCEPTABLE
        elif result['result_code']==ResultCode.UNACCEPTABLE:
            final_result['score']=0
            final_result['status']=JudgeResultCode.WRONG_ANSWER
        else:
            final_result['score']=0
            final_result['status']=JudgeResultCode.SYSTEM_ERROR
        return final_result
    def judge(self):
        '''
        the same as OIProblem.judge, we judge all testcases and submit results to scoring engine
        '''
        if not self.scoring_engine:
            raise RuntimeError("Participant not initialized")
        for case_id in self.testcase:
            result=self._judge_per_case(case_id)
            self.scoring_engine.set_testcase_result(
                id=case_id,
                result=result['score'],
                time=0,
                mem=0,
                status=result['status']
            )
        return self.scoring_engine.get_result()
        # then call ScoringEngine.get_result the return it back
    def to_json(self): # call judge() first
        if not self.scoring_engine:
            raise RuntimeError("Participant not initialized")
        return self.scoring_engine.to_json()
    def __del__(self):
        self.release_partcipant()