import os
from typing import Union

import numpy as np
import pandas as pd

from job.auto_cut.modules.content_merger_dev import ContentMerger

def cross_speaker_merge(merge_info):
    # 初始化结果列表和临时存储列表
    result = []
    temp_list = []
    pre_start = 0

    for i in range(len(merge_info)-1):
        if len(temp_list) < 2:
            # 如果临时列表中少于两个元素，直接添加当前元素
            temp_list.append(merge_info[i])
        else:
            # 获取当前元素的speaker ID列表
            current_spk_ids = set(merge_info[i]['spk_id'])
            # 获取临时列表中所有speaker ID的集合
            temp_spk_ids = {spk_id for item in temp_list for spk_id in item['spk_id']}

            if current_spk_ids & temp_spk_ids:
                # 如果当前speaker ID列表与临时列表中有交集，继续添加到临时列表
                temp_list.append(merge_info[i])
            else:
                # 如果当前speaker ID列表与临时列表中没有交集，将临时列表中的数据合并后添加到结果列表
                if len(temp_list) == 2:
                    item = temp_list[0]
                    result.append({
                        'start': item['start'],
                        'end': item['end'],
                        'content': item['content'],
                        'spk_id': set(item['spk_id'])
                    })
                else:
                    merged_item = {
                        'start': temp_list[0]['start'],
                        'end': temp_list[-1]['end'],
                        'content': ' '.join(item['content'] for item in temp_list),
                        'spk_id': temp_spk_ids
                    }
                    result.append(merged_item)
                # 清空临时列表，开始新的合并
                temp_list = [merge_info[i]]

    # 处理最后一组数据
    if temp_list:
        if len(temp_list) == 2:
            # 如果最后一组数据只有两个元素，分开存储
            for item in temp_list:
                result.append({
                    'start': item['start'],
                    'end': item['end'],
                    'content': item['content'],
                    'spk_id': set(item['spk_id'])
                })
        else:
            # 否则，合并最后一组数据
            merged_item = {
                'start': temp_list[0]['start'],
                'end': temp_list[-1]['end'],
                'content': ' '.join(item['content'] for item in temp_list),
                'spk_id': {spk_id for item in temp_list for spk_id in item['spk_id']}
            }
            result.append(merged_item)

    return result

def confirm_adv(merged_info):
    for info in merged_info:
        if info['adv_type'] == 1:
            print(info['content'])
    return merged_info

def get_merged_range(merged_info):
    ranges = []
    for info in merged_info:
        ranges.append([info['start'], info['end']])
    return np.array(ranges)

def find_most_inclusive_ranges(ranges, target_time):
    """
    找出完全包含给定时间段的范围最多的ID。

    :param ranges: 一个包含(start, end)元组的列表，表示一系列时间范围
    :param target_time: 一个(start, end)元组，表示感兴趣的时间段
    :return: 包含最多给定时间段的范围ID，如果有多个，则返回任意一个
    """

    # 确保target_time被包含在范围内
    def is_inclusive(range_, target):
        return max(range_[0], target[0]) < min(range_[1], target[1])
        # return range_[0] >= target[0] and range_[1] <= target[1]

    # 计数每个范围是否包含目标时间
    matches = [is_inclusive(range_, target_time) for range_ in ranges]

    # 获取匹配的范围ID及其数量
    matched_ids = [i for i, match in enumerate(matches) if match]

    # 如果没有匹配的范围，返回None
    if not matched_ids:
        return None, None

    # 返回匹配范围的第一个ID（假设我们要的是任意一个）
    return matched_ids[0], matched_ids[-1]
def get_extra_transcript(extra_ranges, transcript: pd.DataFrame):
    start_rows = transcript['start'].tolist()
    end_rows = transcript['end'].tolist()
    texts = transcript['text'].tolist()
    time_ranges = list(zip(start_rows, end_rows))

    # extra_df = pd.DataFrame(columns=['start', 'end', 'content'])
    extra_lst = []
    for range_ in extra_ranges:
        start_id, end_id = find_most_inclusive_ranges(time_ranges, range_)
        if start_id is None or end_id is None:
            continue
        start_range = time_ranges[start_id]
        end_range = time_ranges[min(len(end_rows)-1, end_id+1)]
        text = texts[start_id:min(len(end_rows), end_id+1)]
        content = ''.join(text)
        # series = pd.Series(text)
        # series_filled = series.fillna('').tolist()
        # content = ''.join(series_filled)
        # if not len(content): continue
        # extra_df.loc[len(extra_df)] = {'start': start_range[0],
        #                                'end': end_range[1],
        #                                'content': content}
        extra_lst.append({'start': start_range[0],
                          'end': end_range[1],
                          'content': content})
    return extra_lst

def merge_overlapping_intervals(intervals):
    if isinstance(intervals, np.ndarray):
        intervals = intervals.tolist()
    if not intervals or not intervals[0]:
        return []

    # 对区间按照起始位置排序
    intervals.sort(key=lambda x: x[0])

    merged = []
    for interval in intervals:
        if not merged or merged[-1][1] < interval[0]:
            merged.append(interval)
        else:
            merged[-1] = [merged[-1][0], max(merged[-1][1], interval[1])]

    return merged

def find_uncovered_intervals(total_range, sub_ranges):
    # 将子区间按起始位置排序
    sorted_sub_ranges = sorted(sub_ranges, key=lambda x: x[0])

    # 初始化已覆盖的最大位置
    max_covered_end = total_range[0]

    # 初始化结果列表，用于存储未覆盖的区间
    uncovered_intervals = []

    # 遍历每个子区间
    for start, end in sorted_sub_ranges:
        # 如果当前子区间的起始位置大于已覆盖的最大位置，说明中间有未覆盖的部分
        if start > max_covered_end:
            # 添加未覆盖的部分到结果列表

            uncovered_intervals.append((max_covered_end, start))
            if end >= total_range[1]:
                return uncovered_intervals

            else:
                # 更新已覆盖的最大位置
                max_covered_end = end
        else:
            max_covered_end = end

    # 检查最后一个子区间结束位置之后是否有未覆盖的部分
    if max_covered_end < total_range[1]:
        uncovered_intervals.append((max_covered_end, total_range[1]))

    return uncovered_intervals
def get_extra_intervals(ranges1:Union[np.ndarray, list], ranges2:Union[np.ndarray, list]):
    ranges2 = merge_overlapping_intervals(ranges2)
    if isinstance(ranges1, np.ndarray):
        ranges1 = ranges1.tolist()
    pop_idx = []
    for i, r1 in enumerate(ranges1):
        for r2 in ranges2:
            if r2[0] <= r1[0] and r1[1] <= r2[1]:
                pop_idx.append(i)

    for i in sorted(pop_idx, reverse=True):
        ranges1.pop(i)

    cluster = {}
    for i in range(len(ranges1)):
        r1 = ranges1[i]
        cluster[i] = []
        for j in range(len(ranges2)):
            r2 = ranges2[j]
            if max(r1[0], r2[0]) >= min(r1[1], r2[1]):
                continue
            else:
                cluster[i].append(j)

    pop_idx.clear()
    extra_ranges = []
    for k, v in cluster.items():
        if not v:
            continue
        else:
            r1 = ranges1[k]
            sub_ranges = [ranges2[i] for i in v]
            gaps = find_uncovered_intervals(r1, sub_ranges)
            extra_ranges.extend(gaps)
            pop_idx.append(k)

    for i in sorted(pop_idx, reverse=True):
        ranges1.pop(i)

    ranges1 += extra_ranges
    # extra_intervals = merge_overlapping_intervals(ranges1)
    return ranges1

content_merger = ContentMerger("..\\auto_cut\\data\\stopwords.txt", "..\\auto_cut\\data\\front_edgewords", "..\\auto_cut\\data\\back_edgewords")
dir_path = "E:\\projects\\江苏数据"
for channel in os.listdir(dir_path):
    if channel != "201":
        continue
    cut_csv = os.path.join(dir_path, channel, "00-00-00-1-cut.csv")
    out_csv = os.path.join(dir_path, channel, "tmp.csv")
    interval_csv = os.path.join(dir_path, channel, "interval.csv")
    transcript_csv = os.path.join(dir_path, channel, "00-00-00-1-transcript.csv")
    transcript = pd.read_csv(transcript_csv)
    out = content_merger(transcript, [], [], 1800)
    # merger_info = pd.DataFrame(merger_info)
    # merger_info.to_csv(out_csv)

    # out = cross_speaker_merge(merger_info)
    # merged_ranges = get_merged_range(out)
    # extra_ranges = get_extra_intervals(merged_ranges, [])
    # out = get_extra_transcript(extra_ranges, transcript)
    print(type(out))
    out = pd.DataFrame(out)
    # 打印结果
    out.to_csv(interval_csv, index=False)
    break
