import re
from constant import *
from utils.instance import SentenceInstance


class RawData:
    def __init__(self, data_dir, tokenizer, args):
        self.n_lines_in_file = 0  # 整个文件中有多少行，其中每一行是 sentence, labels
        self.n_multi_aspect_span = 0  # 统计整个数据集中有多少个样本是拥有多个 aspect 的样本
        self.n_label = 0  # 整个文件中，有多少个 (aspect, opinion, sentiment) 三元组
        self.n_multi_aspect_label = 0  # abc

        self.data = []  # 最核心的内容，其中每个东西都是一个 instance 对象
        self.multi_aspect_id = []
        self.data_dir = data_dir

        # 创建一些用来进行 test 的代码
        self.all_triplets, self.all_multi_triplets, self.all_single_triplets = set(), set(), set()  # 这三个都是 i-al-ar-ol-or-s 的 str 组成的 set
        self.i_as_ae_set, self.i_os_oe_set, self.i_al_ar_ol_or_set, self.i_al_ar_s_set = set(), set(), set(), set()
        self.i2al_ar_ol_or_s_dict = dict()

        # 构造 self.data
        data_lines = open(data_dir)
        for i, text in enumerate(data_lines):
            # I charge it at night and skip taking the cord with me because of the good battery life .####[([16, 17], [15], 'POS')]
            self.n_lines_in_file += 1
            current_sentence = re.search(r'.+(?=####)', text)[0]
            labels = eval(re.search(r'(?<=####).+', text)[0])

            current_triplet_list = []

            aspect_span_list, opinion_span_list, sentiment_id_list = [], [], []

            for aspect_spans, opinion_spans, sentiment_str in labels:
                # sentiment_str 是 "POS", "NEG", "NEU", "0", "Invalid" 等等
                a_span = [aspect_spans[0], aspect_spans[-1]]
                o_span = [opinion_spans[0], opinion_spans[-1]]
                if a_span in aspect_span_list:  # 如果这个方面已经被挖掘了
                    index = aspect_span_list.index(a_span)
                    opinion_span_list[index].append(o_span)
                    if sentiment2id[sentiment_str] != sentiment_id_list[index]:
                        print('inconsistent sentiment')  # 此时，对于一个 aspect，其 opinion 不相同，且情感不同
                        pass
                    pass
                else:  # 如果这个方面未曾被挖掘
                    aspect_span_list.append(a_span)
                    opinion_span_list.append([o_span])
                    sentiment_id_list.append(sentiment2id[sentiment_str])
                    pass
                # 更新总元组个数
                self.n_label += 1
                pass

            # 判断是否将这个样本放入多方面样本
            if len(aspect_span_list) > 1:  # this sample is a multiple aspect sample
                self.multi_aspect_id.append(i)
                self.n_multi_aspect_span += 1
                self.n_multi_aspect_label += len(labels)
                pass

            # 更新用来进行 test 的若干 set
            for a_span, o_span, sentiment_str in labels:
                str_i = str(i)
                sentiment_id_str = str(sentiment2id[sentiment_str])
                # 更新 self.all_triplets、self.all_multi_triplets, self.all_single_triplets
                i_as_ae_os_oe_s_str = "-".join([str_i, str(a_span[0]), str(a_span[-1]), str(o_span[0]), str(o_span[-1]), sentiment_id_str])
                self.all_triplets.add(i_as_ae_os_oe_s_str)
                if len(aspect_span_list) > 1:
                    self.all_multi_triplets.add(i_as_ae_os_oe_s_str)
                    pass
                else:
                    self.all_single_triplets.add(i_as_ae_os_oe_s_str)
                    pass
                # 更新其他的 set
                self.i_as_ae_set.add("-".join([str_i, str(a_span[0]), str(a_span[-1])]))
                self.i_os_oe_set.add("-".join([str_i, str(o_span[0]), str(o_span[-1])]))
                self.i_al_ar_ol_or_set.add("-".join([str_i, str(a_span[0]), str(a_span[-1]), str(o_span[0]), str(o_span[-1])]))
                self.i_al_ar_s_set.add("-".join([str_i, str(a_span[0]), str(a_span[-1]), sentiment_id_str]))
                # 更新 i2al_ar_ol_or_s_dict
                al_ar_ol_or_s_str = "-".join([str(a_span[0]), str(a_span[-1]), str(o_span[0]), str(o_span[-1]), sentiment_id_str])
                if self.i2al_ar_ol_or_s_dict.__contains__(str_i):
                    self.i2al_ar_ol_or_s_dict[str_i].append(al_ar_ol_or_s_str)
                    pass
                else:
                    self.i2al_ar_ol_or_s_dict[str_i] = [al_ar_ol_or_s_str]
                    pass
                pass

            # 生成真正的元组 ([a_s, a_e], [[o1_s, o1_e], [o2_s, o2_e], sentiment_id])
            for j in range(len(aspect_span_list)):
                new_triplet = (aspect_span_list[j], opinion_span_list[j], sentiment_id_list[j])
                current_triplet_list.append(new_triplet)
                pass

            # 将真正的元组放入 self.data 当中
            self.data.append(SentenceInstance(current_sentence, current_triplet_list, tokenizer))
            pass

        print(f"RawData in {data_dir} __init__() finished!")
        pass

    def __iter__(self):
        for i in range(len(self.data)):
            yield self.data[i]  # 返回的对象是一个 SentenceInstance
            pass
        pass

    def __getitem__(self, index) -> SentenceInstance:
        return self.data[index]

    # 'i-al-ar' set
    def get_all_aspect_set(self) -> set:
        return self.i_as_ae_set

    # 'i-ol-or' set
    def get_all_opinion_set(self) -> set:
        return self.i_os_oe_set

    # 'i-al-ar-ol-or' set
    def get_all_pair_set(self) -> set:
        return self.i_al_ar_ol_or_set

    # 'i-al-ar-s' set
    # 其中 i 代表序号，al=aspect_left，ar=aspect_right，s=sentiment
    def get_all_as_set(self) -> set:
        return self.i_al_ar_s_set

    # dict {'i': 'al-ar-ol-or-s'}
    def get_triplets_dict(self) -> dict:
        return self.i2al_ar_ol_or_s_dict

    def __len__(self):
        return len(self.data)

    pass


def generate_test_data_from_raw_data(original_data: RawData):
    return [
        original_data.get_all_aspect_set(),  # 'i-al-ar' set
        original_data.get_all_opinion_set(),  # 'i-ol-or' set
        (
            original_data.all_triplets,  # 'i-al-ar-ol-or-s' set
            original_data.all_multi_triplets,  # 'i-al-ar-ol-or-s' set
            original_data.all_single_triplets,  # 'i-al-ar-ol-or-s' set
            original_data.multi_aspect_id  # id set
        ),
        original_data.get_all_pair_set(),  # 'i-al-ar-ol-or' set
        original_data.get_all_as_set(),  # 'i-al-ar-s' set
        original_data.multi_aspect_id,  # id set
        original_data.get_triplets_dict()  # {'i': 'al-ar-ol-or-s'} dict
    ]
