import json
import os
import random
import re
from http import HTTPStatus
from typing import Union

import jieba
# import openai
import pandas as pd
import numpy as np
from langchain.text_splitter import RecursiveCharacterTextSplitter

from .text_clustering import SinglePassCluster
from .content_merger_dev import ContentMerger
from .prompt_template import *
from .timestamp_process import *
from .llm import LargeLanguageModel

os.environ['DASHSCOPE_API_KEY'] = 'sk-8ceef23dc3864b2f8d1a651a5dcfbda3'

import warnings
from functools import wraps


def experimental(func):
    @wraps(func)
    def wrapper(*args, **kwargs):
        warnings.warn(
            f"{func.__name__} is an experimental function and may change or be removed in future versions.",
            category=UserWarning,
            stacklevel=2
        )
        return func(*args, **kwargs)

    return wrapper


class CutAgent:
    def __init__(self, stopwords_path, front_edgewords_path, back_edgewords_path,
                 max_step=3600, model_name="/data/.cache/modelscope/hub/qwen/Qwen2-7B-Instruct", chunk_size=6000):
        self.chunk_size = chunk_size
        self.max_step = max_step
        self.llm_model = LargeLanguageModel(model_name)
        self.r_splitter = RecursiveCharacterTextSplitter(chunk_size=self.chunk_size, chunk_overlap=0)
        self.front_edgewords_path = front_edgewords_path
        self.back_edgewords_path = back_edgewords_path
        self.stopwords_path = stopwords_path
        self.content_merger = ContentMerger(self.stopwords_path, self.front_edgewords_path, self.back_edgewords_path)

    def __call__(self, transcript: pd.DataFrame, silent_points, backtracking_points):
        self.update_edge_words(backtracking_points, transcript, self.content_merger.front_edge_words,
                               self.content_merger.back_edge_words)

        merged_info = self.content_merger(transcript, silent_points,
                                          backtracking_points, self.max_step)
        merged_ranges = self.get_merged_range(merged_info)
        extra_ranges = self.get_extra_intervals(merged_ranges, backtracking_points)
        extra_lst = self.get_extra_transcript(extra_ranges, transcript)
        # out_df.to_csv(meta_path[:-4] + "middle.csv", index=False)
        # 改动前
        extra_lst = self.cls_adv(extra_lst)
        extra_lst = self.find_adv_edge(extra_lst, transcript)
        out_df = pd.DataFrame(extra_lst)
        return out_df

        # 改动后
        # meta_path = "/data/video-record-operator/ZLMediaKit/www/record/live/node-jiangsu/201/2024-11-12/00-00-00-1-cut.csv"
        # tmp_df = pd.read_csv(meta_path)
        # tmp_info = tmp_df.to_dict(orient='records')
        # out_df = self.confirm_adv(tmp_info)
        # out_df = pd.DataFrame(out_df)
        # out_df.to_csv(meta_path[:-4]+"middle.csv", index=False)

        # extra_lst = self.generate_content_relation(extra_lst)
        # merged_info = self.concat_content(extra_lst)
        # merged_info = self.process_vaild_content(merged_info, self.content_merger.stopwords)
        # # merged_info = self.generate_content_relation(extra_lst)
        # # merged_info = self.find_adv_edge(merged_info, transcript, self.content_merger.stopwords)
        # merged_info = pd.DataFrame(merged_info)
        # #
        # return merged_info

    def generate_content_relation(self, merged_info):
        for i in range(1, len(merged_info)):
            text1 = merged_info[i - 1]['content'][-500:]
            text2 = merged_info[i]['content'][:500]
            prompt_content = get_relation_prompt(text1, text2)
            out = self.call_with_messages(prompt_content, self.llm_model, max_token=3)
            if 'invalid' in out:
                out = '不存在'
            merged_info[i - 1]['concat'] = out
        merged_info[-1]['concat'] = '不存在'

        return merged_info

    def cls_adv(self, merged_info):

        def flatten_entity_info(entity_info: list):
            entity = ''
            for _info in entity_info:
                _entity = _info['entity']
                _type = _info['type']
                entity += (_entity + ';')
            return entity

        for info in merged_info:
            if isinstance(info['content'], float):
                info['llm_output'] = 0
                continue
            entity_info = self.entity_parse(info['content'][:self.chunk_size])
            if entity_info is None:
                info['llm_output'] = 0
                continue
            info["entity"] = flatten_entity_info(entity_info)
            if info['entity']:
                info['llm_output'] = 1
            else:
                info['llm_output'] = 0

        return merged_info

    def confirm_adv(self, merged_info):
        for info in merged_info:
            if info['llm_output'] == 1:
                out = self.filter_news_noise(info['content'][:self.chunk_size])
                if out > 0:
                    info['confirmed'] = 1
                else:
                    info['confirmed'] = 0
        return merged_info

    def entity_parse(self, content, max_output=3000):
        words = jieba.lcut(content)  # 使用 jieba 分词
        cuts = [word for word in words if word not in self.content_merger.stopwords]
        if len(cuts) < 8:
            return
        out = self.call_with_messages(get_all_entity_prompt(content), self.llm_model, max_token=max_output)
        json_pattern = r'\[(.*?)\]'
        match = re.search(json_pattern, out, re.DOTALL)
        if match:
            json_str = match.group(0)
            try:
                # 将提取的 JSON 字符串解析为 Python 对象
                json_data = json.loads(json_str)
                return json_data
            except json.JSONDecodeError as e:
                print(f"JSON 解析错误: {e}")
        else:
            print("未找到匹配的 JSON 部分")

    def concat_content(self, merged_info):
        pop_idx = []
        for i in range(1, len(merged_info)):
            # if merged_info[i-1]['llm_output'] > 1:
            #     pop_idx.append(i - 1)
            # elif merged_info[i]['llm_output'] > 1:
            #     pop_idx.append(i)
            if '不存在' in merged_info[i - 1]['concat']:
                continue
            elif merged_info[i]['start'] <= merged_info[i - 1]['end']:
                merged_info[i]['content'] = merged_info[i - 1]['content'] + merged_info[i]['content']
                merged_info[i]['start'] = merged_info[i - 1]['start']
                # merged_info[i]['llm_output'] = merged_info[i - 1]['llm_output'] or merged_info[i]['llm_output']
                pop_idx.append(i - 1)

        for i in sorted(pop_idx, reverse=True):
            merged_info.pop(i)
        return merged_info

    def process_vaild_content(self, df, stopwords):
        # outputs = []
        for row in df:
            text = row['content']
            words = jieba.lcut(text)  # 使用 jieba 分词
            cuts = [word for word in words if word not in stopwords]
            if len(cuts) < 5:
                out = 0
            elif 'cuda memory' in text:
                out = 1
            else:
                if len(text) > 2000:
                    text_chunks = self.r_splitter.split_text(text)
                else:
                    text_chunks = [text]
                out = 0
                for text in text_chunks:
                    prompt = get_adv_cls_prompt(text)
                    response = self.call_with_messages(prompt, self.llm_model, max_token=6)
                    if 'invalid' in response:
                        response = '未知'

                    if '未知' not in response:
                        prompt = get_type_prompt(text)
                        response = self.call_with_messages(prompt, self.llm_model, max_token=6)
                        if '未知' in response:
                            out = 0
                        elif '广告' not in response:
                            out = 2
                            break
                        prompt = get_reflection_prompt(text)
                        # prompt = get_type_prompt(text)
                        response = self.call_with_messages(prompt, self.llm_model, max_token=6)
                        if 'invalid' in response:
                            response = '未知'

                    if '广告' in response:
                        out = 1 or out
                    else:
                        out = 0 or out
            row['llm_output'] = out

        # df['llm_output'] = outputs

        return df

    @staticmethod
    def call_with_messages(content, model, max_token=6):
        try:
            output_content = model(content, max_token)
        except:
            output_content = 'invalid input or error occurs in llm!'

        return output_content

    @staticmethod
    def get_merged_range(merged_info):
        ranges = []
        for info in merged_info:
            ranges.append([info['start'], info['end']])
        return np.array(ranges)

    @staticmethod
    def update_edge_words(backtracking_points, transcript: pd.DataFrame, front_edgewords, back_edgewords):
        start_rows = transcript['start'].tolist()
        end_rows = transcript['end'].tolist()
        texts = transcript['text'].tolist()
        time_ranges = list(zip(start_rows, end_rows))

        for range_ in backtracking_points:
            start_id, end_id = find_most_inclusive_ranges(time_ranges, range_)
            if start_id is None or end_id is None:
                continue
            text = texts[start_id:min(len(end_rows), end_id + 1)]
            content = ''.join(text)
            front_edgewords.append(content[:10])
            back_edgewords.append(content[-10:])

    @staticmethod
    def get_extra_transcript(extra_ranges, transcript: pd.DataFrame):
        start_rows = transcript['start'].tolist()
        end_rows = transcript['end'].tolist()
        texts = transcript['text'].tolist()
        time_ranges = list(zip(start_rows, end_rows))

        # extra_df = pd.DataFrame(columns=['start', 'end', 'content'])
        extra_lst = []
        for range_ in extra_ranges:
            start_id, end_id = find_most_inclusive_ranges(time_ranges, range_)
            if start_id is None or end_id is None:
                continue
            start_range = time_ranges[start_id]
            end_range = time_ranges[min(len(end_rows) - 1, end_id + 1)]
            text = texts[start_id:min(len(end_rows), end_id + 1)]
            content = ''.join(text)
            # series = pd.Series(text)
            # series_filled = series.fillna('').tolist()
            # content = ''.join(series_filled)
            # if not len(content): continue
            # extra_df.loc[len(extra_df)] = {'start': start_range[0],
            #                                'end': end_range[1],
            #                                'content': content}
            extra_lst.append({'start': start_range[0],
                              'end': end_range[1],
                              'content': content})
        return extra_lst

    @staticmethod
    def get_extra_intervals(ranges1: Union[np.ndarray, list], ranges2: Union[np.ndarray, list]):
        ranges2 = merge_overlapping_intervals(ranges2)
        if isinstance(ranges1, np.ndarray):
            ranges1 = ranges1.tolist()
        pop_idx = []
        for i, r1 in enumerate(ranges1):
            for r2 in ranges2:
                if r2[0] <= r1[0] and r1[1] <= r2[1]:
                    pop_idx.append(i)

        for i in sorted(pop_idx, reverse=True):
            ranges1.pop(i)

        cluster = {}
        for i in range(len(ranges1)):
            r1 = ranges1[i]
            cluster[i] = []
            for j in range(len(ranges2)):
                r2 = ranges2[j]
                if max(r1[0], r2[0]) >= min(r1[1], r2[1]):
                    continue
                else:
                    cluster[i].append(j)

        pop_idx.clear()
        extra_ranges = []
        for k, v in cluster.items():
            if not v:
                continue
            else:
                r1 = ranges1[k]
                sub_ranges = [ranges2[i] for i in v]
                gaps = find_uncovered_intervals(r1, sub_ranges)
                extra_ranges.extend(gaps)
                pop_idx.append(k)

        for i in sorted(pop_idx, reverse=True):
            ranges1.pop(i)

        ranges1 += extra_ranges
        # extra_intervals = merge_overlapping_intervals(ranges1)
        return ranges1

    def find_adv_edge(self, merged_info, transcript):
        def get_transcript_row_id(transcript, start_txt, end_txt, start_cache):
            start_id = None
            end_id = None
            found_start = False

            for index, row in transcript.iterrows():
                if start_cache and row['start'] < start_cache[-1]:
                    continue
                if ((len(row['text']) < len(start_txt) and row['text'] in start_txt) or
                        (len(row['text']) >= len(start_txt) and start_txt in row['text'])):
                    start_id = index
                    found_start = True
                elif found_start and ((len(row['text']) < len(end_txt) and row['text'] in end_txt) or
                                      (len(row['text']) >= len(end_txt) and end_txt in row[
                                          'text'])) and index >= start_id:
                    end_id = index
                    break

            return start_id, end_id

        def format_out(row, single_flag, llm_flag, entity_name):
            row['llm_output'] = llm_flag
            row['single_entity'] = single_flag
            row['entity_name'] = entity_name

        new_merged_info = []
        start_cache = []
        for i in range(len(merged_info)):
            if merged_info[i]["llm_output"]<1:
                continue
            content = merged_info[i]['content']
            words = jieba.lcut(content)  # 使用 jieba 分词
            cuts = [word for word in words if word not in self.content_merger.stopwords]
            if len(cuts) < 8:
                return
            elif 'cuda memory' in content:
                continue
            else:
                text_chunks = self.r_splitter.split_text(content)
                for sub_text in text_chunks:
                    is_single_entity = self.single_entity_cls(sub_text, max_output=200)
                    if is_single_entity is not None:
                        single_flag = is_single_entity[0]
                        entity_name = is_single_entity[1]
                        if single_flag == 1:
                            if len(content) > 300:
                                if '-' in entity_name:
                                    entity_name = entity_name.split('-')[0]
                                if content.count(entity_name) < 2:
                                    continue
                            format_out(merged_info[i], single_flag, 1, entity_name)
                            new_merged_info.append(merged_info[i])
                            continue
                        # elif single_flag < 1:
                        #     continue

                    edge_info = self.edge_info_parse(sub_text, max_output=3000)
                    if edge_info is not None:
                        for info in edge_info:
                            start_id, end_id = get_transcript_row_id(transcript, info['start'], info['end'],
                                                                     start_cache)
                            if start_id is None or end_id is None:
                                continue
                            start = transcript.iloc[start_id]['start']
                            end = transcript.iloc[end_id]['end']
                            text = ''.join(transcript.iloc[start_id:end_id + 1]['text'])
                            start_cache.append(start)
                            is_single_entity = self.single_entity_cls(text, max_output=200)
                            if is_single_entity is not None:
                                single_flag = is_single_entity[0]
                                entity_name = is_single_entity[1]
                                if single_flag == 1:
                                    if '-' in entity_name:
                                        entity_name = ''.join(entity_name.split('-'))
                                    tmp_info = {'start': start,
                                                'end': end,
                                                'content': text}
                                    format_out(tmp_info, single_flag, 1, entity_name)
                                    new_merged_info.append(tmp_info)

        return new_merged_info

        #
        # if isinstance(json_data, list):
        #     merged_info[i]['edge_info'] = json_data

    def public_entity_cls(self, content, max_output=200):
        out = self.call_with_messages(get_public_prompt(content), self.llm_model, max_token=max_output)
        tuple_pattern = r'\((.*?)\)'
        match = re.search(tuple_pattern, out, re.DOTALL)
        if match:
            tuple_str = match.group(0)
            try:
                import ast
                is_single = ast.literal_eval(tuple_str)
                return is_single
            except:
                print("无法解析字符串为元组")
        else:
            print("未找到匹配的文本")

    def single_entity_cls(self, content, max_output=200):
        out = self.call_with_messages(get_entity_prompt(content), self.llm_model, max_token=max_output)
        tuple_pattern = r'\((.*?)\)'
        match = re.search(tuple_pattern, out, re.DOTALL)
        if match:
            tuple_str = match.group(0)
            try:
                import ast
                is_single = ast.literal_eval(tuple_str)
                return is_single
            except:
                print("无法解析字符串为元组")
        else:
            print("未找到匹配的文本")

    def edge_info_parse(self, content, max_output=3000):
        out = self.call_with_messages(get_egde_prompt(content), self.llm_model, max_token=max_output)
        json_pattern = r'\[(.*?)\]'
        match = re.search(json_pattern, out, re.DOTALL)
        if match:
            json_str = match.group(0)
            try:
                # 将提取的 JSON 字符串解析为 Python 对象
                json_data = json.loads(json_str)
                return json_data
            except json.JSONDecodeError as e:
                print(f"JSON 解析错误: {e}")
        else:
            print("未找到匹配的 JSON 部分")

    @experimental
    def filter_news_noise(self, content, max_output=3000):
        out = self.call_with_messages(adv_confirmed_prompt(content), self.llm_model, max_token=max_output)
        if "不存在" in out:
            return 0
        elif 'invalid' in out:
            return 0
        else:
            return 1
