from abc import ABC
import os
import json
from functools import partial

class DataWrapper(ABC):
    def __init__(self) -> None:
        super().__init__()
    def __len__(self):
        raise NotImplementedError
    def __getitem__(self):
        raise NotImplementedError

class CMeIEData(DataWrapper):
    def __init__(self, datapath="CMeIE", datatype="train") -> None:
        
        self._schema_filename = os.path.join(datapath, "53_schemas.jsonl")
        
        if datatype == "train":
            self._data_filename = os.path.join(datapath, "CMeIE_train.jsonl")
        elif datatype == "validation":
            self._data_filename = os.path.join(datapath, "CMeIE_dev.jsonl")
        elif datatype == "test":
            self._data_filename = os.path.join(datapath, "CMeIE_test.jsonl")
        
        schema_mapping = self.load_schema_file(self._schema_filename)

        self.id2relation = schema_mapping["id2relation"]
        self.id2entity = schema_mapping["id2entity"]
        self.relation2id = schema_mapping["relation2id"]
        self.entity2id = schema_mapping["entity2id"]
        
        self.data = self.load_data_file(self._data_filename)
        
    def __len__(self):
        return len(self.data)
    
    def __getitem__(self, index):
        sample = self.data[index]
        text = sample["text"]
        text = text.strip("\ufeff")
        
        # get_substr_spanidx的两个参数text和offset_mapping在本函数中是固定的, 用partial固定
        get_span = partial(self.get_substr_span,text=text)
        
        cooked_sample = {
            "text":text, 
            "spo_list":[]
            }
        
        for rel in sample['spo_list']:
            subject_str = rel["subject"].strip("\ufeff")
            subject_span = get_span(subject_str)
            
            object_str = rel["object"]["@value"].strip("\ufeff")
            object_span = get_span(object_str)
            
            cooked_predicate = {
                "predicate":self.relation2id[rel["predicate"]],
                "subject_span":subject_span, 
                "subject_type":self.entity2id[rel["subject_type"]], 
                "object_span":object_span,
                "object_type":self.entity2id[rel["object_type"]["@value"]], 
            }
            cooked_sample["spo_list"].append(cooked_predicate)
        
        return cooked_sample
    
    @staticmethod
    def get_substr_span(substr:str, text:str):
        span_start = text.index(substr)
        span_end = span_start + len(substr) - 1
        return [span_start, span_end]
    
    @staticmethod
    def load_schema_file(filename):
        entity_types = set()
        relation_types = set()
        with open(filename, "r", encoding='utf-8') as fp:
            jsonlns = fp.readlines()
            for jsonl in jsonlns:
                curr_rel = json.loads(jsonl)
                
                entity_types.add(curr_rel["subject_type"])
                entity_types.add(curr_rel["object_type"])
                relation_types.add(curr_rel["predicate"])
                
        id2entity = sorted(list(entity_types))  # 结构稳定, 按照unicode编码排序
        id2relation = sorted(list(relation_types))
        
        entity2id = {k:v for v,k in enumerate(id2entity)}
        relation2id = {k:v for v,k in enumerate(id2relation)}
        
        # 重新将列表转化为字典, 统一输出格式
        id2entity = {k:v for k,v in enumerate(id2entity)}
        id2relation = {k:v for k,v in enumerate(id2relation)}
    
        return {
            "id2entity": id2entity,
            "id2relation": id2relation,
            "entity2id": entity2id,
            "relation2id": relation2id
        }
    
    @staticmethod
    def load_data_file(filename):
        samples = []
        with open(filename, "r", encoding="utf-8") as fp:
            while json_line := fp.readline():
                json_text = json_line.strip(" \n\t\ufeff")
                json_data = json.loads(json_text)
                samples.append(json_data)
                
        return samples
    