import copy
from dataclasses import asdict

from apps.task.parseabs import AncientBooksAbs
from apps.task.re_pattern import RePattern
from apps.task.works_one_parse import WorksOneParse, set_c_order


class ParseWorks(AncientBooksAbs):

    def __init__(self):
        self.inputs = None
        self.one_datas = None
        self._result = None
        self._result_list = []
        self.wop = WorksOneParse()
        self._finish_deal_len = 0
        self._latest_len = 0

    def set_inputs(self, inputs: str):
        """
        输入的数据可以是多条,每次数据进来时 代表新的数据开始 C_N_ORDER 编号写为0
        :param inputs:
        :return:
        """
        # 设置全局变量为0
        set_c_order()
        self.inputs = inputs

    def set_file_info(self, file_list_count):
        """
        多文件的信息，单文件没有这种顾虑
        :param file_list_count:
        :return:
        """
        self.file_list_count = file_list_count
        self.file_count = len(self.file_list_count)

    def get_count_in_file(self, count):
        """"
        获取行在哪个文件的哪一行
        """
        now_count = count
        for line_num, file_name in self.file_list_count:
            if now_count > line_num:
                now_count = now_count - line_num
            elif now_count == line_num:  # 标识这个文件的最后一行
                return now_count - 1, file_name
            else:
                return now_count - 1, file_name
        # 如果第一次 for循环报错，可能是在 data_parse 函数正常执行完后面报的错误
        # 这个时候定位错误 要减去self._latest_len
        now_count = count - self._latest_len
        for line_num, file_name in self.file_list_count:
            if now_count > line_num:
                now_count = now_count - line_num
            elif now_count == line_num:  # 标识这个文件的最后一行
                return now_count - 1, file_name
            else:
                return now_count - 1, file_name
        raise Exception("文件量和传入的数据不对")

    def get_list_data(self):
        """
        将每条数据的文本切分到list里面
        :return:
        """
        assert isinstance(self.inputs, str), Exception("输入数据不能为空或不为str的数据")
        return self.inputs.split("\n")

    def get_one_data(self):
        list_data = self.get_list_data()
        print(f"共计行: {len(list_data)}")
        new_list = [data[len("\ufeff"):] if data.startswith("\ufeff") else data for data in list_data if
                    data != "\ufeff"]

        # 初始化一个空的子列表
        sublists = []
        current_sublist = []
        is_start = False
        is_first_c = True
        for item in new_list:
            item = item.strip()
            if item.startswith('Ａ'):
                # 如果以"Ａ"开头，创建一个新子列表并添加到sublists
                current_sublist = [item]
                sublists.append(current_sublist)
                is_start = True
                is_first_c = True
            elif is_start and item.startswith('Ｃ'):
                if is_first_c:
                    current_sublist.append(item)
                    is_first_c = False
                    if item.replace('Ｃ', "") >= "2":
                        temp_lists = sublists.pop()
                        current_sublist = sublists[-1]
                        current_sublist.extend(temp_lists)
                else:
                    current_sublist.append(item)
            else:
                # 否则，将项添加到当前子列表
                current_sublist.append(item)
        new_sublists = []
        data = None
        print(f"共计作者数量为：{len(sublists)}")
        # 如果列表最后一个数据是页，插入到下个list的第一个
        for sublist in sublists:
            if RePattern.is_page_id(sublist[-1]):
                # 取出最后一个数据
                data = sublist[-1]
                new_sublists.append(sublist[:-1])
            else:
                if data:
                    # 在第一个位置插入数据
                    sublist.insert(0, data)
                    data = None
                    new_sublists.append(sublist)
                else:
                    new_sublists.append(sublist)

        # 打印分割后的子列表
        for sublist in new_sublists:
            print(f"该作者行数为：{len(sublist)}")
            yield sublist

    def data_parse(self, input_dicts, *args, **kwargs):
        """
        解析
        :param args:
        :param kwargs:
        :return:
        """
        # 开始一批新的文件的解析 设置为0
        self._finish_deal_len = 0  # 已处理长度
        for list_data in self.get_one_data():
            if not RePattern.is_author_id(list_data[0]):
                # print(list_data[0])
                # print(len(list_data[0]))
                if len(list_data) < 2 or not RePattern.is_author_id(list_data[1]):
                    raise Exception("第一行没有作者，错误...")
            my_list = [s.strip() for s in list_data if s.strip()]  # 这里去除了空行
            self.one_datas = my_list
            self.wop = WorksOneParse()
            self.wop.other_info = {
                "file_count": self.file_count,
                "file_list_count": self.file_list_count
            }
            self.wop.run_parse(my_list)
            # input_dicts = {"work_id": "test"}
            self._result = self.wop.deal_c1_list(input_dicts)
            self._result_list.extend(self._result)
            self._latest_len = len(list_data)
            self._finish_deal_len = self._finish_deal_len + self._latest_len

    def get_parse_result(self):
        lists = []
        title_info_c1group = []
        # item 是一个c1
        for c1_info in self._result_list:
            # json_data = json.dumps(item, ensure_ascii=False, default=lambda o: o.__dict__)
            # if json_data.find("null") > -1:
            #     print("find null ........")
            content_dict = asdict(c1_info)
            # content_dict['title_info'] 是包含c1的所有cn列表
            temp_title_info = copy.deepcopy(content_dict['title_info'])
            title_info_c1group.append([cn_info["chapter_id"] for cn_info in temp_title_info])
            lists.extend(temp_title_info)
        dicts = asdict(self._result_list[0])
        print(title_info_c1group)
        dicts["title_info"] = lists
        dicts['title_info_c1group'] = title_info_c1group
        return dicts
