# This is created for building data from bert tokenizer.

import itertools
from typing import Dict, List, Tuple
import json

from overrides import overrides
import logging
import torch.tensor
import numpy as np
import random
import copy

from neural_modules.bert_servant import BertServant,RobertaServant,DebertaServant
from wn_featurizer.additional_feature import encode_num_in_ltokens
from allennlp.data.fields import MetadataField

from allennlp.data.dataset_readers.dataset_reader import DatasetReader

from allennlp.data.token_indexers import SingleIdTokenIndexer, TokenIndexer, ELMoTokenCharactersIndexer
from allennlp.data.fields import Field, TextField, LabelField, ArrayField,ListField
from allennlp.data.instance import Instance
from allennlp.data import Token

from data_util.customized_field import IdField, BertIndexField,GraphIndexField
from data_util.exvocab import ExVocabulary, read_normal_embedding_file, load_vocab_embeddings, build_vocab_embeddings

# from pathlib import Path
from pathlib import Path
import config
#from sample_for_nli.tf_idf_sample_v1_0 import select_sent_for_eval, sample_v1_0


from wn_featurizer import wn_persistent_api

from data_util.paragraph_span import ParagraphSpan
from utils.print_log import get_info_log
logger=get_info_log(__name__)

MAX_EVIDENCE_SENT_NUM = 5   # We might change this for ground truth evidence

from utils.text_clean import STOPWORDS
class BertReader(DatasetReader):
    """
    WordNet augmented Data Reader.
    """

    def __init__(self,
                 bert_servant: BertServant,
                 lazy: bool = False,
                 example_filter=None,
                 max_l=60, shuffle_sentences=False) -> None:

        # max_l indicate the max length of each individual sentence.
        # the final concatenation of sentences is 60 * 6 = 5(evid) * 60 + 1(claim) * 60

        super().__init__(lazy=lazy)
        self._example_filter = example_filter
        self.max_l = max_l
        self.shuffle_sentences = shuffle_sentences
        self.bert_servant: BertServant = bert_servant

    @overrides
    def _read(self, data_list):
        logger.info("Reading Fever instances from upstream sampler")
        for example in data_list:
            label = example["label"]

            if self._example_filter is None:
                pass
            elif self._example_filter(example):
                continue

            # We use binary parse here
            # first element is the sentence and the second is the upstream semantic relatedness score.
            premise: List[Tuple[str, float]] = example["evid"]
            # truncate premise
            premise = premise[:MAX_EVIDENCE_SENT_NUM]

            hypothesis = example["claim"]

            if len(premise) == 0:
                premise = [("EEMMPPTTYY", 0.0)]

            pid = str(example['id'])

            yield self.text_to_instance(premise, hypothesis, pid, label)

    @overrides
    def text_to_instance(self,  # type: ignore
                         premise: List[Tuple[str, float]],  # Important type information
                         hypothesis: str,
                         pid: str = None,
                         label: str = None) -> Instance:

        fields: Dict[str, Field] = {}

        if self.shuffle_sentences:
            # Potential improvement. Shuffle the input sentences. Maybe close this at last several epoch.
            random.shuffle(premise)

        premise_prob_list = []
        premise_tokens_list = []

        for premise_sent, prob in premise:
            tokenized_cur_sent = self.bert_servant.tokenize(premise_sent, modify_from_corenlp=True)
            # cur_sent_ids = self.bert_servant.tokens_to_ids(tokenized_cur_sent)

            if self.max_l is not None:
                tokenized_cur_sent = tokenized_cur_sent[:self.max_l]    # truncate max length (default 60)

            premise_tokens_list.extend(tokenized_cur_sent)
            prob_value = np.ones((len(tokenized_cur_sent), 1), dtype=np.float32) * prob
            premise_prob_list.append(prob_value)

        premise_prob = np.concatenate(premise_prob_list, axis=0)
        # premise_tokens_id_list = self.bert_servant.tokens_to_ids(premise_tokens_list)

        hypothesis_tokens_list = self.bert_servant.tokenize(hypothesis, modify_from_corenlp=True)

        # print("WTF!!!, p", len(premise_tokens_list))
        # print("WTF!!!, h", len(hypothesis_tokens_list))

        if self.max_l is not None:
            hypothesis_tokens_list = hypothesis_tokens_list[:self.max_l]

        hypothesis_prob = np.ones((len(hypothesis_tokens_list), 1), dtype=np.float32)

        assert len(premise_tokens_list) == len(premise_prob)
        assert len(hypothesis_tokens_list) == len(hypothesis_prob)

        paired_tokens_sequence = ['[CLS]'] + premise_tokens_list + ['[SEP]'] + hypothesis_tokens_list + ['[SEP]']
        token_type_ids = [0] * (2 + len(premise_tokens_list)) + [1] * (1 + len(hypothesis_tokens_list))

        paired_ids_seq = self.bert_servant.tokens_to_ids(paired_tokens_sequence)
        assert len(paired_ids_seq) == len(token_type_ids)
        fields['paired_sequence'] = BertIndexField(np.asarray(paired_ids_seq, dtype=np.int64))
        fields['paired_token_type_ids'] = BertIndexField(np.asarray(token_type_ids, dtype=np.int64))

        premise_span = (1, 1 + len(premise_tokens_list)) # End is exclusive (important for later use)
        hypothesis_span = (premise_span[1] + 1, premise_span[1] + 1 + len(hypothesis_tokens_list))

        assert len(paired_ids_seq) == 1 + (premise_span[1] - premise_span[0]) + 1 + \
               (hypothesis_span[1] - hypothesis_span[0]) + 1

        fields['bert_premise_span'] = MetadataField(premise_span)
        fields['bert_hypothesis_span'] = MetadataField(hypothesis_span)

        fields['premise_probs'] = MetadataField(premise_prob)
        fields['hypothesis_probs'] = MetadataField(hypothesis_prob)

        if label:
            fields['label'] = LabelField(label, label_namespace='labels')

        if pid:
            fields['pid'] = IdField(pid)

        return Instance(fields)

class RobertaReader(DatasetReader):
    """
    WordNet augmented Data Reader.
    """

    def __init__(self,
                 bert_servant: RobertaServant,
                 lazy: bool = False,
                 example_filter=None,
                 max_l=60, shuffle_sentences=False) -> None:

        # max_l indicate the max length of each individual sentence.
        # the final concatenation of sentences is 60 * 6 = 5(evid) * 60 + 1(claim) * 60

        super().__init__(lazy=lazy)
        self._example_filter = example_filter
        self.max_l = max_l
        self.shuffle_sentences = shuffle_sentences
        self.bert_servant: RobertaServant = bert_servant

    @overrides
    def _read(self, data_list):
        logger.info("Reading Fever instances from upstream sampler")
        for example in data_list:
            label = example["selection_label"]

            if self._example_filter is None:
                pass
            elif self._example_filter(example):
                continue

            premise = example['text']
            hypothesis = example["query"]

            if premise=="":
                premise = "EEMMPPTTYY"

            pid = str(example['selection_id'])

            yield self.text_to_instance(premise, hypothesis, pid, label)

    @overrides
    def text_to_instance(self,  # type: ignore
                         premise: List[Tuple[str, float]],  # Important type information
                         hypothesis: str,
                         pid: str = None,
                         label: str = None) -> Instance:

        fields: Dict[str, Field] = {}


        premise_tokens_list=self.bert_servant.tokenize(premise,modify_from_corenlp=True)
        hypothesis_tokens_list=self.bert_servant.tokenize(hypothesis,modify_from_corenlp=True)
        p_l=len(premise_tokens_list)
        h_l=len(hypothesis_tokens_list)
        if self.max_l is not None and p_l+h_l>self.max_l-4:
            diff=p_l+h_l-self.max_l+4
            premise_tokens_list=premise_tokens_list[:self.max_l-diff]

        paired_tokens=["<s>"]+hypothesis_tokens_list+["</s>","</s>"]+premise_tokens_list+["<s>"]
        paired_input_mask=[1]*len(paired_tokens)
        while len(paired_tokens)<self.max_l:
            paired_tokens.append("<pad>")
            paired_input_mask.append(0)
        paired_token_ids=self.bert_servant.tokens_to_ids(paired_tokens)
        #paired_token_type_ids=[0] * (2 + len(premise_ids_list)) + [1] * (2 + len(hypothesis_ids_list))
        #token2field
        fields['paired_sequence'] = BertIndexField(np.asarray(paired_token_ids, dtype=np.int64))
        #fields['paired_token_type_ids'] = BertIndexField(np.asarray(paired_token_type_ids, dtype=np.int64))
        fields['paired_input_mask']=BertIndexField(np.asarray(paired_input_mask,dtype=np.int64))
        if label:
            fields['selection_label'] = LabelField(label, label_namespace='selection_labels')
        if pid:
            fields['pid'] = IdField(pid)

        return Instance(fields)


class AlbertReader(DatasetReader):
    """
    WordNet augmented Data Reader.
    """

    def __init__(self,
                 bert_servant,
                 lazy: bool = False,
                 example_filter=None,
                 max_l=60, shuffle_sentences=False) -> None:

        # max_l indicate the max length of each individual sentence.
        # the final concatenation of sentences is 60 * 6 = 5(evid) * 60 + 1(claim) * 60

        super().__init__(lazy=lazy)
        self._example_filter = example_filter
        self.max_l = max_l
        self.shuffle_sentences = shuffle_sentences
        self.bert_servant = bert_servant

    @overrides
    def _read(self, data_list):
        logger.info("Reading Fever instances from upstream sampler")
        for example in data_list:
            label = example["selection_label"]

            if self._example_filter is None:
                pass
            elif self._example_filter(example):
                continue

            premise = example['text']
            hypothesis = example["query"]

            if premise=="":
                premise = "EEMMPPTTYY"

            pid = str(example['selection_id'])

            yield self.text_to_instance(premise, hypothesis, pid, label)

    @overrides
    def text_to_instance(self,  # type: ignore
                         premise: List[Tuple[str, float]],  # Important type information
                         hypothesis: str,
                         pid: str = None,
                         label: str = None) -> Instance:

        fields: Dict[str, Field] = {}


        premise_tokens_list=self.bert_servant.tokenize(premise,modify_from_corenlp=True)
        hypothesis_tokens_list=self.bert_servant.tokenize(hypothesis,modify_from_corenlp=True)
        p_l=len(premise_tokens_list)
        h_l=len(hypothesis_tokens_list)
        if self.max_l is not None and p_l+h_l>self.max_l-3:
            diff=p_l+h_l-self.max_l+3
            premise_tokens_list=premise_tokens_list[:self.max_l-diff]

        paired_tokens=["[CLS]"]+hypothesis_tokens_list+["[SEP]"]+premise_tokens_list+["[SEP]"]
        paired_input_mask=[1]*len(paired_tokens)
        paired_token_type_ids=[0] * (2 + len(hypothesis_tokens_list)) + [1] * (1 + len(premise_tokens_list))
        while len(paired_tokens)<self.max_l:
            paired_tokens.append("[PAD]")
            paired_input_mask.append(0)
            paired_token_type_ids.append(1)
        paired_token_ids=self.bert_servant.tokens_to_ids(paired_tokens)
        #token2field
        fields['paired_sequence'] = BertIndexField(np.asarray(paired_token_ids, dtype=np.int64))
        fields['paired_token_type_ids'] = BertIndexField(np.asarray(paired_token_type_ids, dtype=np.int64))
        fields['paired_input_mask']=BertIndexField(np.asarray(paired_input_mask,dtype=np.int64))
        if label:
            fields['selection_label'] = LabelField(label, label_namespace='selection_labels')
        if pid:
            fields['pid'] = IdField(pid)

        return Instance(fields)


class RobertaNLIReader(DatasetReader):
    """
    WordNet augmented Data Reader.
    """

    def __init__(self,
                 bert_servant: RobertaServant,
                 lazy: bool = False,
                 example_filter=None,
                 max_l=60, shuffle_sentences=False) -> None:

        # max_l indicate the max length of each individual sentence.
        # the final concatenation of sentences is 60 * 6 = 5(evid) * 60 + 1(claim) * 60

        super().__init__(lazy=lazy)
        self._example_filter = example_filter
        self.max_l = max_l
        self.shuffle_sentences = shuffle_sentences
        self.bert_servant: RobertaServant = bert_servant

    @overrides
    def _read(self, data_list):
        logger.info("Reading Fever instances from upstream sampler")
        for example in data_list:
            label = example["label"]

            if self._example_filter is None:
                pass
            elif self._example_filter(example):
                continue

            premise = "".join(example['evidence_text_list'])
            hypothesis = example["claim"]

            if premise=="":
                premise = "EEMMPPTTYY"

            pid = str(example['id'])

            yield self.text_to_instance(premise, hypothesis, pid, label)

    @overrides
    def text_to_instance(self,  # type: ignore
                         premise: List[Tuple[str, float]],  # Important type information
                         hypothesis: str,
                         pid: str = None,
                         label: str = None) -> Instance:

        fields: Dict[str, Field] = {}


        premise_tokens_list=self.bert_servant.tokenize(premise,modify_from_corenlp=True)
        hypothesis_tokens_list=self.bert_servant.tokenize(hypothesis,modify_from_corenlp=True)
        p_l=len(premise_tokens_list)
        h_l=len(hypothesis_tokens_list)
        if self.max_l is not None and p_l+h_l+4>self.max_l:
            diff=p_l+h_l-self.max_l+4
            premise_tokens_list=premise_tokens_list[:self.max_l-diff]

        paired_tokens=["<s>"]+hypothesis_tokens_list+["</s>","</s>"]+premise_tokens_list+["<s>"]
        paired_input_mask=[1]*len(paired_tokens)
        while len(paired_tokens)<self.max_l:
            paired_tokens.append("<pad>")
            paired_input_mask.append(0)
        paired_token_ids=self.bert_servant.tokens_to_ids(paired_tokens)
        
        #paired_token_type_ids=[0] * (2 + len(premise_ids_list)) + [1] * (2 + len(hypothesis_ids_list))
        #token2field
        fields['paired_sequence'] = BertIndexField(np.asarray(paired_token_ids, dtype=np.int64))
        #fields['paired_token_type_ids'] = BertIndexField(np.asarray(paired_token_type_ids, dtype=np.int64))
        fields['paired_input_mask']=BertIndexField(np.asarray(paired_input_mask,dtype=np.int64))
        if label:
            fields['label'] = LabelField(label, label_namespace='labels')
        if pid:
            fields['pid'] = IdField(pid)

        return Instance(fields)



class XlnetConcatReader(DatasetReader):

    def __init__(self,
                 bert_servant:DebertaServant,
                 lazy: bool = False,
                 example_filter=None,
                 max_l=60,max_evi_node_l=40,shuffle_sentences=False) -> None:

        # max_l indicate the max length of each individual sentence.
        # the final concatenation of sentences is 60 * 6 = 5(evid) * 60 + 1(claim) * 60

        super().__init__(lazy=lazy)
        self._example_filter = example_filter
        self.max_l = max_l
        self.max_evi_node_l=max_evi_node_l
        self.shuffle_sentences = shuffle_sentences
        self.bert_servant: DebertaServant = bert_servant

    @overrides
    def _read(self, data_list):
        logger.info("Reading Fever instances from upstream sampler")
        for example in data_list:
            label = example["label"]

            if self._example_filter is None:
                pass
            elif self._example_filter(example):
                continue

            #list
            evidence_list=example['evidence_text_list']
            evidence_tuple_list=example['evidence_tuple_list']
            claim = example["claim"]
            # srl_tuple_list=example['srl_tuples']
            srl_tuple_list=[]
            # claim_tuple_list=example['claim_tuples']
            claim_tuple_list=[]

            if len(evidence_list)==0:
                evidence_list = ["EEMMPPTTYY"]

            pid = str(example['id'])

            yield self.text_to_instance(evidence_list, evidence_tuple_list,claim, claim_tuple_list,srl_tuple_list,pid, label)
    def clean_text(self,text):
        text = text.replace('\t',' ')
        text = text.replace('-LRB-','(')
        text = text.replace('-RRB-',')')
        text = text.replace('LRB','(')
        text = text.replace('RRB',')')
        text = text.replace('-LSB-','(')
        text = text.replace('-RSB-', ')')
        return text

    def _truncate_seq_pair(self,tokens_a, tokens_b, max_length):
        """Truncates a sequence pair in place to the maximum length."""

        # This is a simple heuristic which will always truncate the longer sequence
        # one token at a time. This makes more sense than truncating an equal percent
        # of tokens from each, since if one sequence is very short then each token
        # that's truncated likely contains more information than a longer sequence.
        while True:
            total_length = len(tokens_a) + len(tokens_b)
            if total_length <= max_length:
                break
            tokens_b.pop()
        return tokens_a,tokens_b
    def get_tuple_list(self,tuple_list):
        ori_tuples=[t for temp in tuple_list for t in temp]
        final_tuples=[]
        #build origin nodes
        for tup in ori_tuples:
            tmp=[]
            for phrase in [tup[0],tup[2],tup[4]]:
                phrase=self.clean_text(phrase)
                tmp.append(phrase)
            final_tuples.append(" ".join(tmp))
        return final_tuples

    @overrides
    def text_to_instance(self,
                         evidence_list,
                         evidence_tuple_list,
                         claim,
                         claim_tuple_list,
                         srl_tuple_list,
                         pid: str = None,
                         label: str = None) -> Instance:

        fields: Dict[str, Field] = {}
        evidence_text="".join(evidence_list)
        evidence_text=self.clean_text(evidence_text)
        # evidence_tuple_list=self.get_tuple_list(evidence_tuple_list)
        #tokenize
        claim_tokens=self.bert_servant.tokenize(claim,modify_from_corenlp=True)
        evidence_tokens=self.bert_servant.tokenize(evidence_text,modify_from_corenlp=True)
        # evidence_tokens=[]
        # for tup in evidence_tuple_list:
        #     evidence_tokens.extend(self.bert_servant.tokenize(self.clean_text(tup),modify_from_corenlp=True))
        # evidence_tokens=self.bert_servant.tokenize(".".join(evidence_tuple_list),modify_from_corenlp=True)
        #truncate the max length
        # print(evidence_tokens)
        # print('*'*20)
        claim_tokens,evidence_tokens=self._truncate_seq_pair(claim_tokens,evidence_tokens,self.max_l-3)
        tokens=claim_tokens+["<sep>"]+evidence_tokens+["<sep>","<cls>"]
        input_mask=[1]*len(tokens)
        token_type_ids = [0]*(len(claim_tokens)+1)+[1]*(len(evidence_tokens)+2)
        #padding
        while len(tokens)<self.max_l:
            tokens.append("<pad>")
            input_mask.append(0)
            token_type_ids.append(1)
        # claim_node_ids,claim_edge_matrix= self.get_node_ids_mask(claim_nodes,claim_tokens,claim_edge_matrix,0,None,ttype="claim")
        # evidence_node_ids,evidence_edge_matrix= self.get_node_ids_mask(evidence_nodes,evidence_tokens,evidence_edge_matrix,len(claim_tokens)+1,None,ttype="evidence")
        token_ids=self.bert_servant.tokens_to_ids(tokens)
        # #token2field
        fields['token_ids'] = BertIndexField(np.asarray(token_ids, dtype=np.int64))
        fields['input_mask']=BertIndexField(np.asarray(input_mask,dtype=np.int64))
        fields['token_type_ids']=BertIndexField(np.asarray(token_type_ids,dtype=np.int64))
        if label:
            fields['label'] = LabelField(label, label_namespace='labels')
        if pid:
            fields['pid'] = IdField(pid)

        return Instance(fields)


class XlnetTupleConcatReader(DatasetReader):

    def __init__(self,
                 bert_servant:DebertaServant,
                 lazy: bool = False,
                 example_filter=None,
                 max_l=60,max_evi_node_l=40,shuffle_sentences=False) -> None:

        # max_l indicate the max length of each individual sentence.
        # the final concatenation of sentences is 60 * 6 = 5(evid) * 60 + 1(claim) * 60

        super().__init__(lazy=lazy)
        self._example_filter = example_filter
        self.max_l = max_l
        self.max_evi_node_l=max_evi_node_l
        self.shuffle_sentences = shuffle_sentences
        self.bert_servant: DebertaServant = bert_servant

    @overrides
    def _read(self, data_list):
        logger.info("Reading Fever instances from upstream sampler")
        for example in data_list:
            label = example["label"]

            if self._example_filter is None:
                pass
            elif self._example_filter(example):
                continue

            #list
            evidence_list=example['evidence_text_list']
            evidence_tuple_list=example['evidence_tuple_list']
            claim = example["claim"]
            # srl_tuple_list=example['srl_tuples']
            srl_tuple_list=[]
            # claim_tuple_list=example['claim_tuples']
            claim_tuple_list=[]

            if len(evidence_list)==0:
                evidence_list = ["EEMMPPTTYY"]

            pid = str(example['id'])

            yield self.text_to_instance(evidence_list, evidence_tuple_list,claim, claim_tuple_list,srl_tuple_list,pid, label)
    def clean_text(self,text):
        text = text.replace('\t',' ')
        text = text.replace('-LRB-','(')
        text = text.replace('-RRB-',')')
        text = text.replace('LRB','(')
        text = text.replace('RRB',')')
        text = text.replace('-LSB-','(')
        text = text.replace('-RSB-', ')')
        return text

    def _truncate_seq_pair(self,tokens_a, tokens_b, max_length):
        """Truncates a sequence pair in place to the maximum length."""

        # This is a simple heuristic which will always truncate the longer sequence
        # one token at a time. This makes more sense than truncating an equal percent
        # of tokens from each, since if one sequence is very short then each token
        # that's truncated likely contains more information than a longer sequence.
        while True:
            total_length = len(tokens_a) + len(tokens_b)
            if total_length <= max_length:
                break
            tokens_b.pop()
        return tokens_a,tokens_b
    def get_tuple_list(self,tuple_list):
        ori_tuples=[t for temp in tuple_list for t in temp]
        final_tuples=[]
        #build origin nodes
        rels=[]
        for tup in ori_tuples:
            tmp=[]
            for phrase in [tup[0],tup[2],tup[4]]:
                phrase=self.clean_text(phrase)
                tmp.append(phrase)
            if tmp[1] not in rels:
                rels.append(self.clean_text(tup[2]))
                final_tuples.append(" ".join(tmp))
        return final_tuples

    @overrides
    def text_to_instance(self,
                         evidence_list,
                         evidence_tuple_list,
                         claim,
                         claim_tuple_list,
                         srl_tuple_list,
                         pid: str = None,
                         label: str = None) -> Instance:

        fields: Dict[str, Field] = {}
        evidence_text="".join(evidence_list)
        evidence_text=self.clean_text(evidence_text)
        evidence_tuple_list=self.get_tuple_list(evidence_tuple_list)
        #tokenize
        claim_tokens=self.bert_servant.tokenize(claim,modify_from_corenlp=True)
        evidence_tokens=self.bert_servant.tokenize(evidence_text,modify_from_corenlp=True)
        # evidence_tokens=[]
        # evidence_tokens.append("<sep>")
        # for tup in evidence_tuple_list:
            # evidence_tokens.extend(self.bert_servant.tokenize(self.clean_text(tup),modify_from_corenlp=True))
        #truncate the max length
        # print(evidence_tokens)
        # print('*'*20)
        claim_tokens,evidence_tokens=self._truncate_seq_pair(claim_tokens,evidence_tokens,self.max_l-3)
        tokens=claim_tokens+["<sep>"]+evidence_tokens+["<sep>","<cls>"]
        input_mask=[1]*len(tokens)
        token_type_ids = [0]*(len(claim_tokens)+1)+[1]*(len(evidence_tokens)+2)
        #padding
        while len(tokens)<self.max_l:
            tokens.append("<pad>")
            input_mask.append(0)
            token_type_ids.append(1)
        # claim_node_ids,claim_edge_matrix= self.get_node_ids_mask(claim_nodes,claim_tokens,claim_edge_matrix,0,None,ttype="claim")
        # evidence_node_ids,evidence_edge_matrix= self.get_node_ids_mask(evidence_nodes,evidence_tokens,evidence_edge_matrix,len(claim_tokens)+1,None,ttype="evidence")
        token_ids=self.bert_servant.tokens_to_ids(tokens)
        # #token2field
        fields['token_ids'] = BertIndexField(np.asarray(token_ids, dtype=np.int64))
        fields['input_mask']=BertIndexField(np.asarray(input_mask,dtype=np.int64))
        fields['token_type_ids']=BertIndexField(np.asarray(token_type_ids,dtype=np.int64))
        if label:
            fields['label'] = LabelField(label, label_namespace='labels')
        if pid:
            fields['pid'] = IdField(pid)

        return Instance(fields)

class AlbertConcatReader(DatasetReader):

    def __init__(self,
                 bert_servant:DebertaServant,
                 lazy: bool = False,
                 example_filter=None,
                 max_l=60,max_evi_node_l=40,shuffle_sentences=False) -> None:

        # max_l indicate the max length of each individual sentence.
        # the final concatenation of sentences is 60 * 6 = 5(evid) * 60 + 1(claim) * 60

        super().__init__(lazy=lazy)
        self._example_filter = example_filter
        self.max_l = max_l
        self.max_evi_node_l=max_evi_node_l
        self.shuffle_sentences = shuffle_sentences
        self.bert_servant: DebertaServant = bert_servant

    @overrides
    def _read(self, data_list):
        logger.info("Reading Fever instances from upstream sampler")
        for example in data_list:
            label = example["label"]

            if self._example_filter is None:
                pass
            elif self._example_filter(example):
                continue

            #list
            evidence_list=example['evidence_text_list']
            evidence_tuple_list=example['evidence_tuple_list']
            claim = example["claim"]
            # srl_tuple_list=example['srl_tuples']
            srl_tuple_list=[]
            # claim_tuple_list=example['claim_tuples']
            claim_tuple_list=[]

            if len(evidence_list)==0:
                evidence_list = ["EEMMPPTTYY"]

            pid = str(example['id'])

            yield self.text_to_instance(evidence_list, evidence_tuple_list,claim, claim_tuple_list,srl_tuple_list,pid, label)
    def clean_text(self,text):
        text = text.replace('\t',' ')
        text = text.replace('-LRB-','(')
        text = text.replace('-RRB-',')')
        text = text.replace('LRB','(')
        text = text.replace('RRB',')')
        text = text.replace('-LSB-','(')
        text = text.replace('-RSB-', ')')
        return text

    def _truncate_seq_pair(self,tokens_a, tokens_b, max_length):
        """Truncates a sequence pair in place to the maximum length."""

        # This is a simple heuristic which will always truncate the longer sequence
        # one token at a time. This makes more sense than truncating an equal percent
        # of tokens from each, since if one sequence is very short then each token
        # that's truncated likely contains more information than a longer sequence.
        while True:
            total_length = len(tokens_a) + len(tokens_b)
            if total_length <= max_length:
                break
            tokens_b.pop()
        return tokens_a,tokens_b
    def get_tuple_list(self,tuple_list):
        ori_tuples=[t for temp in tuple_list for t in temp]
        final_tuples=[]
        #build origin nodes
        for tup in ori_tuples:
            tmp=[]
            for phrase in [tup[0],tup[2],tup[4]]:
                phrase=self.clean_text(phrase)
                tmp.append(phrase)
            final_tuples.append(" ".join(tmp))
        return final_tuples

    @overrides
    def text_to_instance(self,
                         evidence_list,
                         evidence_tuple_list,
                         claim,
                         claim_tuple_list,
                         srl_tuple_list,
                         pid: str = None,
                         label: str = None) -> Instance:

        fields: Dict[str, Field] = {}
        evidence_text="".join(evidence_list)
        evidence_text=self.clean_text(evidence_text)
        # evidence_tuple_list=self.get_tuple_list(evidence_tuple_list)
        #tokenize
        claim_tokens=self.bert_servant.tokenize(claim,modify_from_corenlp=True)
        evidence_tokens=self.bert_servant.tokenize(evidence_text,modify_from_corenlp=True)
        # evidence_tokens=[]
        # for tup in evidence_tuple_list:
        #     evidence_tokens.extend(self.bert_servant.tokenize(self.clean_text(tup),modify_from_corenlp=True))
        # evidence_tokens=self.bert_servant.tokenize(".".join(evidence_tuple_list),modify_from_corenlp=True)
        #truncate the max length
        # print(evidence_tokens)
        # print('*'*20)
        claim_tokens,evidence_tokens=self._truncate_seq_pair(claim_tokens,evidence_tokens,self.max_l-3)
        tokens=["[CLS]"]+claim_tokens+["[SEP"]+evidence_tokens+["[SEP]"]
        input_mask=[1]*len(tokens)
        token_type_ids = [0]*(len(claim_tokens)+2)+[1]*(len(evidence_tokens)+1)
        #padding
        while len(tokens)<self.max_l:
            tokens.append("[PAD]")
            input_mask.append(0)
            token_type_ids.append(1)
        # claim_node_ids,claim_edge_matrix= self.get_node_ids_mask(claim_nodes,claim_tokens,claim_edge_matrix,0,None,ttype="claim")
        # evidence_node_ids,evidence_edge_matrix= self.get_node_ids_mask(evidence_nodes,evidence_tokens,evidence_edge_matrix,len(claim_tokens)+1,None,ttype="evidence")
        token_ids=self.bert_servant.tokens_to_ids(tokens)
        # #token2field
        fields['token_ids'] = BertIndexField(np.asarray(token_ids, dtype=np.int64))
        fields['input_mask']=BertIndexField(np.asarray(input_mask,dtype=np.int64))
        fields['token_type_ids']=BertIndexField(np.asarray(token_type_ids,dtype=np.int64))
        if label:
            fields['label'] = LabelField(label, label_namespace='labels')
        if pid:
            fields['pid'] = IdField(pid)

        return Instance(fields)

class XlnetGCNReader(DatasetReader):
    """
    WordNet augmented Data Reader.
    """

    def __init__(self,
                 bert_servant:DebertaServant,
                 lazy: bool = False,
                 example_filter=None,
                 max_l=60,max_evi_node_l=40,shuffle_sentences=False) -> None:

        # max_l indicate the max length of each individual sentence.
        # the final concatenation of sentences is 60 * 6 = 5(evid) * 60 + 1(claim) * 60

        super().__init__(lazy=lazy)
        self._example_filter = example_filter
        self.max_l = max_l
        self.max_evi_node_l=max_evi_node_l
        self.shuffle_sentences = shuffle_sentences
        self.bert_servant: DebertaServant = bert_servant

    @overrides
    def _read(self, data_list):
        logger.info("Reading Fever instances from upstream sampler")
        for example in data_list:
            label = example["label"]

            if self._example_filter is None:
                pass
            elif self._example_filter(example):
                continue

            #list
            evidence_list=example['evidence_text_list']
            evidence_tuple_list=example['evidence_tuple_list']
            claim = example["claim"]
            # srl_tuple_list=example['srl_tuples']
            srl_tuple_list=[]
            # claim_tuple_list=example['claim_tuples']
            claim_tuple_list=[]

            if len(evidence_list)==0:
                evidence_list = ["EEMMPPTTYY"]

            pid = str(example['id'])

            yield self.text_to_instance(evidence_list, evidence_tuple_list,claim, claim_tuple_list,srl_tuple_list,pid, label)
    def clean_text(self,text):
        text = text.replace('\t',' ')
        text = text.replace('-LRB-','(')
        text = text.replace('-RRB-',')')
        text = text.replace('LRB','(')
        text = text.replace('RRB',')')
        text = text.replace('-LSB-','(')
        text = text.replace('-RSB-', ')')
        return text
    
    def edge_hard_rules(self,a, b):
        if len(a.split(' ')) < 2 or len(b.split(' ')) < 2:
            return False
        if a in b or b in a:
            return True
        a = set(a.lower().split(' ')) - STOPWORDS
        b = set(b.lower().split(' ')) - STOPWORDS
        if len(a) < 2 or len(b) < 2:
            return False
        #one common word
        if len(a - b) == 1 and len(b-a) ==1:
            return True

        return False

    def get_nodes_edges(self,tuple_list,ttype='evidence'):
        nodes = {}
        edges = []
        if ttype=="claim":
            tuples=tuple_list
        elif ttype=="evidence":
            tuples=[t for temp in tuple_list for t in temp]
        #build origin nodes
        for tup in tuples:
            for phrase in [tup[0],tup[2],tup[4]]:
                phrase=self.clean_text(phrase)
                if phrase not in nodes:
                    nodes[phrase]={"idx":len(nodes)}
        #deep copy
        edge_matrix=[([0] * len(nodes)) for i in range(len(nodes))] 
        # build original edges
        for tup in tuples:
            sub=self.clean_text(tup[0])
            rel=self.clean_text(tup[2])
            obj=self.clean_text(tup[4])
            edge_matrix[nodes[sub]['idx']][nodes[rel]['idx']]=1
            edge_matrix[nodes[rel]['idx']][nodes[sub]['idx']]=1
            edge_matrix[nodes[rel]['idx']][nodes[obj]['idx']]=1
            edge_matrix[nodes[obj]['idx']][nodes[rel]['idx']]=1
            edge_matrix[nodes[sub]['idx']][nodes[sub]['idx']]=1
            edge_matrix[nodes[rel]['idx']][nodes[rel]['idx']]=1
            edge_matrix[nodes[obj]['idx']][nodes[obj]['idx']]=1
        # extend edges according to hard rules
        for node1 in nodes:
            for node2 in nodes:
                if node1 == node2:
                    continue
                # test the edge (node1, node2) according to hard rules
                if edge_matrix[nodes[node1]['idx']][nodes[node2]['idx']] == 0:
                    if self.edge_hard_rules(node1, node2):
                        edge_matrix[nodes[node1]['idx']][nodes[node2]['idx']] = 1
                        edge_matrix[nodes[node2]['idx']][nodes[node1]['idx']] = 1
                        idx_1=nodes[node1]['idx']
                        idx_2=nodes[node2]['idx']
                        for i in range(len(edge_matrix)):
                            if edge_matrix[idx_1][i]==1 or edge_matrix[idx_2][i]==1:
                                edge_matrix[idx_1][i]=1
                                edge_matrix[idx_2][i]=1
                        # print(node1)
                        # print(node2)
                        # print(edge_matrix[idx_1])
                        # print(edge_matrix[idx_2])
                        # print("*"*20)
        # print(edge_matrix)
        # assert 1==2
        return nodes,edge_matrix

    def _truncate_seq_pair(self,tokens_a, tokens_b, max_length):
        """Truncates a sequence pair in place to the maximum length."""

        # This is a simple heuristic which will always truncate the longer sequence
        # one token at a time. This makes more sense than truncating an equal percent
        # of tokens from each, since if one sequence is very short then each token
        # that's truncated likely contains more information than a longer sequence.
        while True:
            total_length = len(tokens_a) + len(tokens_b)
            if total_length <= max_length:
                break
            tokens_b.pop()

    def firt_index_list(self,a, b):
        # get the min index the first time list b appear in a
        length_b = len(b)
        text_b = ' '.join(b)
        for i in range(len(a)):
            if i+len(b) > len(a):
                return -1
            if ' '.join(a[i:i+len(b)]) == text_b:
                return i
        return -1
    
    def get_node_ids_mask(self,nodes,tokens,edge_matrix,begin_idx,max_node_l,ttype="claim"):
        nodes_dc = copy.deepcopy(nodes)
        for node in nodes:
            node=self.clean_text(node)
            start_pos = self.firt_index_list(tokens, self.bert_servant.tokenize(node))
            final_start_pos = begin_idx+start_pos
            node_piece_length = len(self.bert_servant.tokenize(node))
            mask = [0] * self.max_l
            begin,length = -1,-1
            # print(node)
            # print(self.bert_servant.tokenize(node))
            # print(tokens)
            # print(start_pos)
            # print("*"*20)
            if start_pos != -1:
                max_pos = min(self.max_l, final_start_pos + node_piece_length)
                begin = final_start_pos
                length = node_piece_length
                for i in range(final_start_pos, max_pos):
                    mask[i] = 1
            else:

                for node2 in edge_matrix[nodes[node]['idx']]:
                    edge_matrix[nodes[node]['idx']][node2] = 0
                    edge_matrix[node2][nodes[node]['idx']] = 0
            
            nodes_dc[node]['mask'] = mask
            nodes_dc[node]['begin_and_length'] = tuple([begin,length])
        node_ids=[]
        for node in sorted(nodes_dc.items(),key=lambda item:item[1]['idx']):
            name=node[0]
            idx=node[1]['idx']
            mask=node[1]['mask']
            begin=node[1]['begin_and_length'][0]
            node_ids.append(mask)
        if len(node_ids)==0:
            node_ids=[[0]*self.max_l]*2
            edge_matrix=[[0,0],[0,0]]
            return node_ids,edge_matrix
        else:
            return node_ids,edge_matrix


    @overrides
    def text_to_instance(self,
                         evidence_list,
                         evidence_tuple_list,
                         claim,
                         claim_tuple_list,
                         srl_tuple_list,
                         pid: str = None,
                         label: str = None) -> Instance:

        fields: Dict[str, Field] = {}
        evidence_text=" ".join(evidence_list)
        evidence_text=self.clean_text(evidence_text)
        # claim_nodes,claim_edge_matrix,claim_edges = self.get_nodes_edges(claim_tuple_list,ttype='claim')
        evidence_nodes,evidence_edge_matrix= self.get_nodes_edges(evidence_tuple_list,ttype='evidence')
        # for evi in evidence_list:
        #     print(evi)
        # print("*"*20)
        # for tup in srl_tuple_list:
        #     for t in tup:
        #         print(t)
        # print(evidence_nodes)
        # print(evidence_edge_matrix)
        # print("#"*50)
        #tokenize
        claim_tokens=self.bert_servant.tokenize(claim,modify_from_corenlp=True)
        evidence_tokens=self.bert_servant.tokenize(evidence_text,modify_from_corenlp=True)
        #truncate the max length
        self._truncate_seq_pair(claim_tokens,evidence_tokens,self.max_l-3)
        tokens=claim_tokens+["<sep>"]+evidence_tokens+["<sep>","<cls>"]
        input_mask=[1]*len(tokens)
        token_type_ids = [0]*(len(claim_tokens)+1)+[1]*(len(evidence_tokens)+2)
        #padding
        while len(tokens)<self.max_l:
            tokens.append("<pad>")
            input_mask.append(0)
            token_type_ids.append(1)
        # claim_node_ids,claim_edge_matrix= self.get_node_ids_mask(claim_nodes,claim_tokens,claim_edge_matrix,0,None,ttype="claim")
        evidence_node_ids,evidence_edge_matrix= self.get_node_ids_mask(evidence_nodes,evidence_tokens,evidence_edge_matrix,len(claim_tokens)+1,None,ttype="evidence")
        # print(evidence_nodes)
        # print(evidence_node_ids)
        # assert 1==2
        # print("*"*20)
        # nodes=[]
        # for i in sorted(evidence_nodes.items(),key=lambda item:item[1]['idx']):
        #     nodes.append(i[0])
        # for i in range(len(evidence_edge_matrix)):
        #     for j in range(i+1,len(evidence_edge_matrix)):
        #         if evidence_edge_matrix[i][j]==1:
        #             print(nodes[i],"---",nodes[j])
        # print("*"*50)
        token_ids=self.bert_servant.tokens_to_ids(tokens)
        # #token2field
        fields['token_ids'] = BertIndexField(np.asarray(token_ids, dtype=np.int64))
        fields['input_mask']=BertIndexField(np.asarray(input_mask,dtype=np.int64))
        fields['token_type_ids']=BertIndexField(np.asarray(token_type_ids,dtype=np.int64))
        # fields['claim_node_ids']=BertIndexField(np.asarray(claim_node_ids,dtype=np.int64))
        # fields['claim_node_mask']=BertIndexField(np.asarray(claim_node_mask,dtype=np.int64))
        # fields['claim_edge_matrix']=BertIndexField(np.asarray(claim_edge_matrix,dtype=np.int64))
        fields['evidence_node_ids']=BertIndexField(np.asarray(evidence_node_ids,dtype=np.int64))
        # fields['evidence_node_mask']=BertIndexField(np.asarray(evidence_node_mask,dtype=np.int64))
        fields['evidence_edge_matrix']=BertIndexField(np.asarray(evidence_edge_matrix,dtype=np.int64))
        if label:
            fields['label'] = LabelField(label, label_namespace='labels')
        if pid:
            fields['pid'] = IdField(pid)

        return Instance(fields)


class XlnetGCNGloveReader(DatasetReader):
    """
    WordNet augmented Data Reader.
    """

    def __init__(self,
                 bert_servant:DebertaServant,
                 lazy: bool = False,
                 example_filter=None,
                 max_l=60,max_evi_node_l=40,shuffle_sentences=False,
                 token_indexers: Dict[str, TokenIndexer] = None) -> None:

        # max_l indicate the max length of each individual sentence.
        # the final concatenation of sentences is 60 * 6 = 5(evid) * 60 + 1(claim) * 60

        super().__init__(lazy=lazy)
        self._token_indexers = token_indexers or {'tokens': SingleIdTokenIndexer(namespace='tokens')}
        self._example_filter = example_filter
        self.max_l = max_l
        self.max_evi_node_l=max_evi_node_l
        self.shuffle_sentences = shuffle_sentences
        self.bert_servant: DebertaServant = bert_servant

    @overrides
    def _read(self, data_list):
        logger.info("Reading Fever instances from upstream sampler")
        for example in data_list:
            label = example["label"]

            if self._example_filter is None:
                pass
            elif self._example_filter(example):
                continue

            #list
            evidence_list=example['evidence_text_list']
            evidence_tuple_list=example['evidence_tuple_list']
            claim = example["claim"]
            # srl_tuple_list=example['srl_tuples']
            srl_tuple_list=[]
            # claim_tuple_list=example['claim_tuples']
            claim_tuple_list=[]

            if len(evidence_list)==0:
                evidence_list = ["EEMMPPTTYY"]

            pid = str(example['id'])

            yield self.text_to_instance(evidence_list, evidence_tuple_list,claim, claim_tuple_list,srl_tuple_list,pid, label)

    def clean_text(self,text):
        text = text.replace('\t',' ')
        text = text.replace('-LRB-','(')
        text = text.replace('-RRB-',')')
        text = text.replace('LRB','(')
        text = text.replace('RRB',')')
        text = text.replace('-LSB-','(')
        text = text.replace('-RSB-', ')')
        return text
    
    def edge_hard_rules(self,a, b):
        if len(a.split(' ')) < 2 or len(b.split(' ')) < 2:
            return False
        if a in b or b in a:
            return True
        a = set(a.lower().split(' ')) - STOPWORDS
        b = set(b.lower().split(' ')) - STOPWORDS
        if len(a) < 2 or len(b) < 2:
            return False
        #one common word
        if len(a - b) == 1 and len(b-a) ==1:
            return True

        return False

    def get_nodes_edges(self,tuple_list,evidence_list):
        nodes = {}
        edges = []
        tuples=[t for temp in tuple_list for t in temp]
        #build origin nodes
        for tup in tuples:
            for phrase in [tup[0],tup[2],tup[4]]:
                phrase=self.clean_text(phrase)
                if phrase not in nodes:
                    nodes[phrase]={"idx":len(nodes),"context":""}
        #deep copy
        edge_matrix=[([0] * len(nodes)) for i in range(len(nodes))] 
        # build original edges
        for tup in tuples:
            sub=self.clean_text(tup[0])
            rel=self.clean_text(tup[2])
            obj=self.clean_text(tup[4])
            edge_matrix[nodes[sub]['idx']][nodes[rel]['idx']]=1
            edge_matrix[nodes[rel]['idx']][nodes[sub]['idx']]=1
            edge_matrix[nodes[rel]['idx']][nodes[obj]['idx']]=1
            edge_matrix[nodes[obj]['idx']][nodes[rel]['idx']]=1
            edge_matrix[nodes[sub]['idx']][nodes[sub]['idx']]=1
            edge_matrix[nodes[rel]['idx']][nodes[rel]['idx']]=1
            edge_matrix[nodes[obj]['idx']][nodes[obj]['idx']]=1
        # extend edges according to hard rules
        for node1 in nodes:
            for node2 in nodes:
                if node1 == node2:
                    continue
                # test the edge (node1, node2) according to hard rules
                if edge_matrix[nodes[node1]['idx']][nodes[node2]['idx']] == 0:
                    if self.edge_hard_rules(node1, node2):
                        edge_matrix[nodes[node1]['idx']][nodes[node2]['idx']] = 1
                        edge_matrix[nodes[node2]['idx']][nodes[node1]['idx']] = 1
                        idx_1=nodes[node1]['idx']
                        idx_2=nodes[node2]['idx']
                        for i in range(len(edge_matrix)):
                            if edge_matrix[idx_1][i]==1 or edge_matrix[idx_2][i]==1:
                                edge_matrix[idx_1][i]=1
                                edge_matrix[idx_2][i]=1
        for node in nodes:
            for evi in evidence_list:
                if node in self.clean_text(evi):
                    nodes[node]['context']=self.clean_text(evi)
                    break
        return nodes,edge_matrix

    def _truncate_seq_pair(self,tokens_a, tokens_b, max_length):
        """Truncates a sequence pair in place to the maximum length."""

        # This is a simple heuristic which will always truncate the longer sequence
        # one token at a time. This makes more sense than truncating an equal percent
        # of tokens from each, since if one sequence is very short then each token
        # that's truncated likely contains more information than a longer sequence.
        while True:
            total_length = len(tokens_a) + len(tokens_b)
            if total_length <= max_length:
                break
            tokens_b.pop()
        return tokens_a,tokens_b
    def truncate_tokens(self,tokens_a,max_length):
        """Truncates a sequence pair in place to the maximum length."""

        # This is a simple heuristic which will always truncate the longer sequence
        # one token at a time. This makes more sense than truncating an equal percent
        # of tokens from each, since if one sequence is very short then each token
        # that's truncated likely contains more information than a longer sequence.
        while True:
            if len(tokens_a) <= max_length:
                break
            tokens_a.pop()
        return tokens_a


    @overrides
    def text_to_instance(self,
                         evidence_list,
                         evidence_tuple_list,
                         claim,
                         claim_tuple_list,
                         srl_tuple_list,
                         pid: str = None,
                         label: str = None) -> Instance:

        fields: Dict[str, Field] = {}
        evidence_text="".join(evidence_list)
        evidence_text=self.clean_text(evidence_text)
        #tokenize
        claim_tokens=self.bert_servant.tokenize(claim,modify_from_corenlp=True)
        evidence_tokens=self.bert_servant.tokenize(evidence_text,modify_from_corenlp=True)
        claim_tokens,evidence_tokens=self._truncate_seq_pair(claim_tokens,evidence_tokens,self.max_l-3)
        tokens=claim_tokens+["<sep>"]+evidence_tokens+["<sep>","<cls>"]
        input_mask=[1]*len(tokens)
        token_type_ids = [0]*(len(claim_tokens)+1)+[1]*(len(evidence_tokens)+2)
        #padding
        while len(tokens)<self.max_l:
            tokens.append("<pad>")
            input_mask.append(0)
            token_type_ids.append(1)
        token_ids=self.bert_servant.tokens_to_ids(tokens)
        #graph
        evidence_nodes,evidence_edge_matrix= self.get_nodes_edges(evidence_tuple_list,evidence_list)
        evidence_node_tokens=[]
        evidence_node_mask=[]
        max_node_len=64
        max_node_num=50
        bert_node_tokens=[]
        bert_node_mask=[]
        bert_node_types=[]
        for node in evidence_nodes.keys():
            if evidence_nodes[node]['context']=="":
                tmp_node=node
            else:
                # tmp_node=node+" is in "+evidence_nodes[node]['context']
                tmp_node=node
            tmp_tokens=[]
            for t in tmp_node.split(" "):
                tmp_tokens.append(Token(t))
            evidence_node_tokens.append(tmp_tokens)
            evidence_node_mask.append(1)

        max_node_num=50
        if len(evidence_node_tokens)==0:
            evidence_node_tokens=[]
            for i in range(max_node_num):
                evidence_node_tokens.append([Token("EEMMPPTTYY")])
            evidence_edge_matrix=[[0]*max_node_num]*max_node_num
            evidence_node_mask=[0]*max_node_num
        elif len(evidence_node_tokens)>max_node_num:
            evidence_node_tokens=evidence_node_tokens[:max_node_num]
            new_edge=[([0] * max_node_num) for i in range(max_node_num)]
            for i in range(max_node_num):
                for j in range(max_node_num):
                    new_edge[i][j]=evidence_edge_matrix[i][j]
            evidence_edge_matrix=new_edge
            evidence_node_mask=evidence_node_mask[:max_node_num]
        elif len(evidence_node_tokens)<max_node_num:
            ori_len=len(evidence_node_tokens)
            while(len(evidence_node_tokens)<max_node_num):
                evidence_node_tokens.append([Token("EEMMPPTTYY")])
            new_edge=[([0] * max_node_num) for i in range(max_node_num)]
            for i in range(ori_len):
                for j in range(ori_len):
                    new_edge[i][j]=evidence_edge_matrix[i][j]
            evidence_edge_matrix=new_edge
            while len(evidence_node_mask)<max_node_num:
                evidence_node_mask.append(0)
        #debug graph
        # for i in range(len(evidence_edge_matrix)):
        #     print(evidence_node_tokens[i])
        #     print(evidence_edge_matrix[i])
        #     print(evidence_node_mask[i])
        #     print("node:",evidence_node_tokens[i])
        #     print("node mask:",evidence_node_mask[i])
        #     for j in range(len(evidence_edge_matrix)):
        #         if evidence_edge_matrix[i][j]==1:
        #             print("other node:",evidence_node_tokens[j])
        #     print("-"*20)
        # assert 1==2
        #token2field
        fields['token_ids'] = BertIndexField(np.asarray(token_ids, dtype=np.int64))
        fields['input_mask']=BertIndexField(np.asarray(input_mask,dtype=np.int64))
        fields['token_type_ids']=BertIndexField(np.asarray(token_type_ids,dtype=np.int64))
        fields['evidence_node_tokens']=ListField([TextField(x, self._token_indexers) for x in evidence_node_tokens])
        fields['evidence_node_mask']=BertIndexField(np.asarray(evidence_node_mask,dtype=np.int64))
        fields['evidence_edge_matrix']=BertIndexField(np.asarray(evidence_edge_matrix,dtype=np.int64))
        if label:
            fields['label'] = LabelField(label, label_namespace='labels')
        if pid:
            fields['pid'] = IdField(pid)

        return Instance(fields)


class XlnetGCNXlnetReader(DatasetReader):
    """
    WordNet augmented Data Reader.
    """

    def __init__(self,
                 bert_servant:DebertaServant,
                 lazy: bool = False,
                 example_filter=None,
                 max_l=60,max_evi_node_l=40,shuffle_sentences=False,
                 token_indexers: Dict[str, TokenIndexer] = None) -> None:

        # max_l indicate the max length of each individual sentence.
        # the final concatenation of sentences is 60 * 6 = 5(evid) * 60 + 1(claim) * 60

        super().__init__(lazy=lazy)
        self._token_indexers = token_indexers or {'tokens': SingleIdTokenIndexer(namespace='tokens')}
        self._example_filter = example_filter
        self.max_l = max_l
        self.max_evi_node_l=max_evi_node_l
        self.shuffle_sentences = shuffle_sentences
        self.bert_servant: DebertaServant = bert_servant

    @overrides
    def _read(self, data_list):
        logger.info("Reading Fever instances from upstream sampler")
        for example in data_list:
            label = example["label"]

            if self._example_filter is None:
                pass
            elif self._example_filter(example):
                continue

            #list
            evidence_list=example['evidence_text_list']
            evidence_tuple_list=example['evidence_tuple_list']
            claim = example["claim"]
            # srl_tuple_list=example['srl_tuples']
            srl_tuple_list=[]
            # claim_tuple_list=example['claim_tuples']
            claim_tuple_list=[]

            if len(evidence_list)==0:
                evidence_list = ["EEMMPPTTYY"]

            pid = str(example['id'])

            yield self.text_to_instance(evidence_list, evidence_tuple_list,claim, claim_tuple_list,srl_tuple_list,pid, label)
    def clean_text(self,text):
        text = text.replace('\t',' ')
        text = text.replace('-LRB-','(')
        text = text.replace('-RRB-',')')
        text = text.replace('LRB','(')
        text = text.replace('RRB',')')
        text = text.replace('-LSB-','(')
        text = text.replace('-RSB-', ')')
        return text
    
    def edge_hard_rules(self,a, b):
        if len(a.split(' ')) < 2 or len(b.split(' ')) < 2:
            return False
        if a in b or b in a:
            return True
        a = set(a.lower().split(' ')) - STOPWORDS
        b = set(b.lower().split(' ')) - STOPWORDS
        if len(a) < 2 or len(b) < 2:
            return False
        #one common word
        if len(a - b) == 1 and len(b-a) ==1:
            return True

        return False

    def get_nodes_edges(self,tuple_list,ttype='evidence'):
        nodes = {}
        edges = []
        if ttype=="claim":
            tuples=tuple_list
        elif ttype=="evidence":
            tuples=[t for temp in tuple_list for t in temp]
        #build origin nodes
        for tup in tuples:
            for phrase in [tup[0],tup[2],tup[4]]:
                phrase=self.clean_text(phrase)
                if phrase not in nodes:
                    nodes[phrase]={"idx":len(nodes)}
        #deep copy
        edge_matrix=[([0] * len(nodes)) for i in range(len(nodes))] 
        # build original edges
        for tup in tuples:
            sub=self.clean_text(tup[0])
            rel=self.clean_text(tup[2])
            obj=self.clean_text(tup[4])
            edge_matrix[nodes[sub]['idx']][nodes[rel]['idx']]=1
            edge_matrix[nodes[rel]['idx']][nodes[sub]['idx']]=1
            edge_matrix[nodes[rel]['idx']][nodes[obj]['idx']]=1
            edge_matrix[nodes[obj]['idx']][nodes[rel]['idx']]=1
            edge_matrix[nodes[sub]['idx']][nodes[sub]['idx']]=1
            edge_matrix[nodes[rel]['idx']][nodes[rel]['idx']]=1
            edge_matrix[nodes[obj]['idx']][nodes[obj]['idx']]=1
        # extend edges according to hard rules
        for node1 in nodes:
            for node2 in nodes:
                if node1 == node2:
                    continue
                # test the edge (node1, node2) according to hard rules
                if edge_matrix[nodes[node1]['idx']][nodes[node2]['idx']] == 0:
                    if self.edge_hard_rules(node1, node2):
                        edge_matrix[nodes[node1]['idx']][nodes[node2]['idx']] = 1
                        edge_matrix[nodes[node2]['idx']][nodes[node1]['idx']] = 1
                        idx_1=nodes[node1]['idx']
                        idx_2=nodes[node2]['idx']
                        for i in range(len(edge_matrix)):
                            if edge_matrix[idx_1][i]==1 or edge_matrix[idx_2][i]==1:
                                edge_matrix[idx_1][i]=1
                                edge_matrix[idx_2][i]=1
        return nodes,edge_matrix
    def get_node_ids_mask(self,nodes,tokens,edge_matrix,begin_idx):
        nodes_dc = copy.deepcopy(nodes)
        mask=[0] * self.max_l
        for node in nodes:
            node=self.clean_text(node)
            start_pos = self.firt_index_list(tokens, self.bert_servant.tokenize(node))
            final_start_pos = begin_idx+start_pos
            node_piece_length = len(self.bert_servant.tokenize(node))
            tmp_mask=copy.deepcopy(mask)
            begin,length = -1,-1
            print("node:",node)
            print("start:",start_pos)
            print("length:",node_piece_length)
            if start_pos != -1:
                max_pos = min(self.max_l, final_start_pos + node_piece_length)
                begin = final_start_pos
                length = node_piece_length
                tmp_value=[1]*(max_pos-final_start_pos)
                tmp_mask[final_start_pos:max_pos] = tmp_value
            else:
                for node2 in edge_matrix[nodes[node]['idx']]:
                    edge_matrix[nodes[node]['idx']][node2] = 0
                    edge_matrix[node2][nodes[node]['idx']] = 0
            print(tmp_mask)
            print("*"*20)
            nodes_dc[node]['mask'] = tmp_mask
            nodes_dc[node]['begin_and_length'] = tuple([begin,length])
        node_ids=[]
        for node in sorted(nodes_dc.items(),key=lambda item:item[1]['idx']):
            name=node[0]
            idx=node[1]['idx']
            mask=node[1]['mask']
            begin=node[1]['begin_and_length'][0]
            node_ids.append(mask)
        if len(node_ids)==0:
            node_ids=[[0]*self.max_l]*2
            edge_matrix=[[0,0],[0,0]]
            return node_ids,edge_matrix
        else:
            return node_ids,edge_matrix

    def _truncate_seq_pair(self,tokens_a, tokens_b, max_length):
        """Truncates a sequence pair in place to the maximum length."""

        # This is a simple heuristic which will always truncate the longer sequence
        # one token at a time. This makes more sense than truncating an equal percent
        # of tokens from each, since if one sequence is very short then each token
        # that's truncated likely contains more information than a longer sequence.
        while True:
            total_length = len(tokens_a) + len(tokens_b)
            if total_length <= max_length:
                break
            tokens_b.pop()

    def firt_index_list(self,a, b):
        # get the min index the first time list b appear in a
        length_b = len(b)
        text_b = ' '.join(b)
        for i in range(len(a)):
            if i+len(b) > len(a):
                return -1
            if ' '.join(a[i:i+len(b)]) == text_b:
                return i
        return -1
    
    def get_glove_node_ids_mask(self,nodes,tokens,edge_matrix,begin_idx,max_node_l,ttype="claim"):
        nodes_dc = copy.deepcopy(nodes)
        for node in nodes:
            node=self.clean_text(node)
            start_pos = self.firt_index_list(tokens, self.bert_servant.tokenize(node))
            final_start_pos = begin_idx+start_pos
            node_piece_length = len(self.bert_servant.tokenize(node))
            mask = [0] * self.max_l
            begin,length = -1,-1
            if start_pos != -1:
                max_pos = min(self.max_l, final_start_pos + node_piece_length)
                begin = final_start_pos
                length = node_piece_length
                for i in range(final_start_pos, max_pos):
                    mask[i] = 1
            else:

                for node2 in edge_matrix[nodes[node]['idx']]:
                    edge_matrix[nodes[node]['idx']][node2] = 0
                    edge_matrix[node2][nodes[node]['idx']] = 0
            
            nodes_dc[node]['mask'] = mask
            nodes_dc[node]['begin_and_length'] = tuple([begin,length])
        node_ids=[]
        for node in sorted(nodes_dc.items(),key=lambda item:item[1]['idx']):
            name=node[0]
            idx=node[1]['idx']
            mask=node[1]['mask']
            begin=node[1]['begin_and_length'][0]
            node_ids.append(mask)
        if len(node_ids)==0:
            node_ids=[[0]*self.max_l]*2
            edge_matrix=[[0,0],[0,0]]
            return node_ids,edge_matrix
        else:
            return node_ids,edge_matrix
    


    @overrides
    def text_to_instance(self,
                         evidence_list,
                         evidence_tuple_list,
                         claim,
                         claim_tuple_list,
                         srl_tuple_list,
                         pid: str = None,
                         label: str = None) -> Instance:

        fields: Dict[str, Field] = {}
        evidence_text=" ".join(evidence_list)
        evidence_text=self.clean_text(evidence_text)
        # claim_nodes,claim_edge_matrix,claim_edges = self.get_nodes_edges(claim_tuple_list,ttype='claim')
        evidence_nodes,evidence_edge_matrix= self.get_nodes_edges(evidence_tuple_list,ttype='evidence')
        #tokenize
        claim_tokens=self.bert_servant.tokenize(claim,modify_from_corenlp=True)
        evidence_tokens=self.bert_servant.tokenize(evidence_text,modify_from_corenlp=True)
        #truncate the max length
        self._truncate_seq_pair(claim_tokens,evidence_tokens,self.max_l-3)
        tokens=claim_tokens+["<sep>"]+evidence_tokens+["<sep>","<cls>"]
        input_mask=[1]*len(tokens)
        token_type_ids = [0]*(len(claim_tokens)+1)+[1]*(len(evidence_tokens)+2)
        #padding
        while len(tokens)<self.max_l:
            tokens.append("<pad>")
            input_mask.append(0)
            token_type_ids.append(1)
        token_ids=self.bert_servant.tokens_to_ids(tokens)
        evidence_node_ids,evidence_edge_matrix= self.get_node_ids_mask(evidence_nodes,evidence_tokens,evidence_edge_matrix,len(claim_tokens)+1)
        print(evidence_edge_matrix)
        #token2field
        fields['token_ids'] = BertIndexField(np.asarray(token_ids, dtype=np.int64))
        fields['input_mask']=BertIndexField(np.asarray(input_mask,dtype=np.int64))
        fields['token_type_ids']=BertIndexField(np.asarray(token_type_ids,dtype=np.int64))
        fields['evidence_node_ids']=BertIndexField(np.asarray(evidence_node_ids,dtype=np.int64))
        fields['evidence_edge_matrix']=BertIndexField(np.asarray(evidence_edge_matrix,dtype=np.int64))
        if label:
            fields['label'] = LabelField(label, label_namespace='labels')
        if pid:
            fields['pid'] = IdField(pid)

        return Instance(fields)


class AlbertNLIReader(DatasetReader):
    """
    WordNet augmented Data Reader.
    """

    def __init__(self,
                 bert_servant,
                 lazy: bool = False,
                 example_filter=None,
                 max_l=60, shuffle_sentences=False) -> None:

        # max_l indicate the max length of each individual sentence.
        # the final concatenation of sentences is 60 * 6 = 5(evid) * 60 + 1(claim) * 60

        super().__init__(lazy=lazy)
        self._example_filter = example_filter
        self.max_l = max_l
        self.shuffle_sentences = shuffle_sentences
        self.bert_servant = bert_servant

    @overrides
    def _read(self, data_list):
        logger.info("Reading Fever instances from upstream sampler")
        for example in data_list:
            label = example["label"]

            if self._example_filter is None:
                pass
            elif self._example_filter(example):
                continue

            premise = "".join(example['evidence_text_list'])
            hypothesis = example["claim"]

            if premise=="":
                premise = "EEMMPPTTYY"

            pid = str(example['id'])

            yield self.text_to_instance(premise, hypothesis, pid, label)

    @overrides
    def text_to_instance(self,  # type: ignore
                         premise: List[Tuple[str, float]],  # Important type information
                         hypothesis: str,
                         pid: str = None,
                         label: str = None) -> Instance:

        fields: Dict[str, Field] = {}


        premise_tokens_list=self.bert_servant.tokenize(premise,modify_from_corenlp=True)
        hypothesis_tokens_list=self.bert_servant.tokenize(hypothesis,modify_from_corenlp=True)
        p_l=len(premise_tokens_list)
        h_l=len(hypothesis_tokens_list)
        if self.max_l is not None and p_l+h_l+3>self.max_l:
            diff=p_l+h_l-self.max_l+3
            premise_tokens_list=premise_tokens_list[:self.max_l-diff]

        paired_tokens=["[CLS]"]+hypothesis_tokens_list+["[SEP]"]+premise_tokens_list+["[SEP]"]
        paired_input_mask=[1]*len(paired_tokens)
        paired_token_type_ids=[0] * (2 + len(hypothesis_tokens_list)) + [1] * (1 + len(premise_tokens_list))
        while len(paired_tokens)<self.max_l:
            paired_tokens.append("[PAD]")
            paired_input_mask.append(0)
            paired_token_type_ids.append(0)
        paired_token_ids=self.bert_servant.tokens_to_ids(paired_tokens)
        
        #token2field
        fields['paired_sequence'] = BertIndexField(np.asarray(paired_token_ids, dtype=np.int64))
        fields['paired_token_type_ids'] = BertIndexField(np.asarray(paired_token_type_ids, dtype=np.int64))
        fields['paired_input_mask']=BertIndexField(np.asarray(paired_input_mask,dtype=np.int64))
        if label:
            fields['label'] = LabelField(label, label_namespace='labels')
        if pid:
            fields['pid'] = IdField(pid)

        return Instance(fields)