from typing import Union

import numpy as np
import pandas as pd
from .timestamp_process import *

from .tools import get_consecutive_ids


class ContentMerger:
    def __init__(self, stopwords_path, front_edgewords_path, back_edgewords_path):
        with open(stopwords_path, 'r', encoding='utf-8') as f:
            self.stopwords = f.readlines()
        with open(front_edgewords_path, 'r', encoding='utf-8') as f:
            self.front_edge_words = f.readlines()
        with open(back_edgewords_path, 'r', encoding='utf-8') as f:
            self.back_edge_words = f.readlines()

    def __call__(self, transcript: pd.DataFrame,
                 silent_points: Union[list, np.ndarray],
                 backtracking_points: Union[list, np.ndarray],
                 max_step):
        silent_points = make_valid_ranges(silent_points)
        front_edge_points, back_edge_points = self.get_edge_words_range(transcript)
        merged_ranges = merge_ranges(silent_points, max_step, [], interval=3)
        transcript_info = self.merge_contents_with_spk(merged_ranges, transcript,
                                                       front_edge_points, back_edge_points)
        return transcript_info

    def get_edge_words_range(self, transcript: pd.DataFrame):
        """
        获取边缘词所在的range
        """
        front_break_points = []
        back_break_points = []
        for index, row in transcript.iterrows():
            text = row['text']
            if not isinstance(text, str):
                continue
            if self.has_edge_word(self.front_edge_words, text):
                print(transcript.loc[index, 'text'], transcript.loc[index, 'start'], transcript.loc[index, 'end'])
                front_break_points.append(transcript.loc[index, 'start'])
            elif self.has_edge_word(self.back_edge_words, text):
                # print(transcript.loc[index, 'text'], transcript.loc[index, 'start'], transcript.loc[index, 'end'])
                back_break_points.append(transcript.loc[index, 'start'])

        return np.unique(front_break_points), np.unique(back_break_points)

    def merge_contents_with_spk(self, merged_ranges, transcript: pd.DataFrame, front_break_points, back_break_points):
        r2i = self.chunk_split_by_spk(merged_ranges, transcript)
        transcript_info = self.get_content_from_spk(transcript, r2i,front_break_points, back_break_points)
        flatten_out = []
        for range_, info in transcript_info.items():
            for i in info:
                i['start'] = i['range'][0]
                i['end'] = i['range'][1]
                flatten_out.append(i)

        flatten_out = self.filter_same(flatten_out)
        flatten_out = self.filter_short_noise(flatten_out, 10)
        # flatten_out = self.filter_edge_lines(flatten_out)
        return flatten_out

    def filter_same(self, flatten_out):
        pop_idx = []
        for i in range(1, len(flatten_out)):
            pre_start = flatten_out[i - 1]['start']
            pre_end = flatten_out[i - 1]['end']

            if pre_start == pre_end and pre_start == flatten_out[i]['start']:
                pop_idx.append(i - 1)

        for i in sorted(pop_idx, reverse=True):
            flatten_out.pop(i)

        return flatten_out

    def filter_short_noise(self, flatten_out, short_len=10):
        pop_idx = []
        for i in range(1, len(flatten_out) - 1):
            start = flatten_out[i]['start']
            end = flatten_out[i]['end']

            if end - start < short_len:
                if start - flatten_out[i - 1]['end'] > flatten_out[i + 1]['start'] - end:
                    flatten_out[i + 1]['content'] = flatten_out[i]['content'] + flatten_out[i + 1]['content']
                    flatten_out[i + 1]['start'] = flatten_out[i]['start']
                    flatten_out[i + 1]['end'] = flatten_out[i + 1]['end']
                else:
                    flatten_out[i - 1]['content'] += flatten_out[i]['content']
                    flatten_out[i - 1]['start'] = flatten_out[i - 1]['start']
                    flatten_out[i - 1]['end'] = flatten_out[i]['end']

                pop_idx.append(i)

        for i in sorted(pop_idx, reverse=True):
            flatten_out.pop(i)

        return flatten_out

    def filter_edge_lines(self, flatten_out):
        pop_idx = []
        for i in range(2, len(flatten_out)):
            pre_text = flatten_out[i - 1]['content']
            if self.has_edge_word(self.edge_words, pre_text):
                if set(flatten_out[i - 1]['spk_id']).intersection(set(flatten_out[i]['spk_id'])):
                    flatten_out[i]['content'] = pre_text + flatten_out[i]['content']
                    flatten_out[i]['start'] = flatten_out[i - 1]['start']
                    flatten_out[i]['end'] = flatten_out[i]['end']
                else:
                    flatten_out[i - 2]['content'] += pre_text
                    flatten_out[i - 2]['start'] = flatten_out[i - 2]['start']
                    flatten_out[i - 2]['end'] = flatten_out[i - 1]['end']
                pop_idx.append(i - 1)

        for i in sorted(pop_idx, reverse=True):
            flatten_out.pop(i)
        return flatten_out

    def get_content_from_spk(self, transcript, consecutive_ids, front_break_points, back_break_points):
        spk_ids = transcript['spk_id'].tolist()

        def get_same_value_ids(row_indices, cur_id, spk_ids):
            if None in row_indices:
                return None
            row_id1 = row_indices[cur_id][0]
            cur_spk_id = spk_ids[row_id1]
            max_id = cur_id
            for i in range(cur_id + 1, len(row_indices)):
                row_id2 = row_indices[i][0]
                next_spk_id = spk_ids[row_id2]
                if next_spk_id == cur_spk_id:
                    if i > max_id:
                        max_id = i

            return (cur_id, max_id)

        results = {}
        for range_, row_ids in consecutive_ids.items():

            # bboxes的元素为多行连续row集合的开始和结尾
            bboxes = []
            for i in range(len(row_ids)):
                bbox = get_same_value_ids(row_ids, i, spk_ids)
                bboxes.append(bbox) if bbox else None

            bboxes = merge_intervals(bboxes, 1)
            front_row_ids = [transcript.index[transcript['start'] == point][0] for point in front_break_points]
            back_row_ids = [transcript.index[transcript['start'] == point][0] for point in back_break_points]
            cut_row_ids = []
            for bbox in bboxes:
                start_id = row_ids[bbox[0]][0]
                end_id = row_ids[bbox[1]][-1]
                cut_row_ids.append([start_id, end_id])
            cut_row_ids = split_intervals(cut_row_ids, front_row_ids, False)
            cut_row_ids = split_intervals(cut_row_ids, back_row_ids, True)
            cut_row_ids = sorted(cut_row_ids, key=lambda x: x[0])
            infos = []
            for row_id in cut_row_ids:
                start_id = row_id[0]
                end_id = row_id[-1]
                info = transcript[start_id: end_id + 1]
                info.fillna("", inplace=True)
                spk_id_slice = spk_ids[start_id: end_id + 1]
                time_range = (info['start'].iloc[0], info['end'].iloc[-1])
                text = ''.join(info['text'].tolist())

                infos.append({'range': time_range, 'content': text, 'spk_id': spk_id_slice})

            results[range_] = infos

        return results

    @staticmethod
    def has_word(word, content):
        import re
        match = re.search(word, content)
        if match:
            return True
        return False

    def has_edge_word(self, edge_words, content):
        for word in edge_words:
            edge_word = word.strip('\n')
            if self.has_word(edge_word, content):
                return True
        return False

    @staticmethod
    def chunk_split_by_spk(merged_ranges, transcript):
        spk_ids = transcript['spk_id'].tolist()
        start_rows = transcript['start'].tolist()
        end_rows = transcript['end'].tolist()
        time_ranges = list(zip(start_rows, end_rows))
        r2i = {}
        for merged_range in merged_ranges:
            start_id, end_id = find_most_inclusive_ranges(time_ranges, merged_range)
            if start_id is None or end_id is None:
                r2i[merged_range] = [None]
                continue
            merged_spk_ids = spk_ids[start_id:end_id + 1]
            consecutive_ids = get_consecutive_ids(merged_spk_ids, start_id)
            r2i[merged_range] = consecutive_ids

        return r2i

    @staticmethod
    def get_same_value_ids(row_indices, cur_id, spk_ids):
        if None in row_indices:
            return None
        row_id1 = row_indices[cur_id][0]
        cur_spk_id = spk_ids[row_id1]
        max_id = cur_id
        for i in range(cur_id + 1, len(row_indices)):
            row_id2 = row_indices[i][0]
            next_spk_id = spk_ids[row_id2]
            if next_spk_id == cur_spk_id:
                if i > max_id:
                    max_id = i

        return (cur_id, max_id)
