import os
import re
import json
import time

import pandas as pd
from openai import OpenAI


def get_relation_prompt(text1, text2):
    prompt = f"""
    请判断A与B是否存在相似内容或连续性
    ------
    数据:
    - A: <{text1}>
    - B: <{text2}>
    ------
    要求: 
    - 如果AB存在相似内容或连续性,输出存在,否则输出不存在
    - 不需要做额外解释
    """
    return prompt


def concat_content(merged_info):
    pop_idx = []
    for i in range(1, len(merged_info)):
        if isinstance(merged_info[i - 1]['32b_concat'], float):
            continue
        if '不存在' in merged_info[i - 1]['32b_concat']:
            continue
        elif merged_info[i]['start'] <= merged_info[i - 1]['end']:
            merged_info[i]['content'] = merged_info[i - 1]['content'] + merged_info[i]['content']
            merged_info[i]['start'] = merged_info[i - 1]['start']
            merged_info[i]['llm_output'] = merged_info[i - 1]['llm_output'] or merged_info[i]['llm_output']
            pop_idx.append(i - 1)

    for i in sorted(pop_idx, reverse=True):
        merged_info.pop(i)
    return merged_info


def generate_content_relation(merged_info):
    for i in range(1, len(merged_info)):
        text1 = merged_info[i - 1]['content']
        text2 = merged_info[i]['content']
        if isinstance(text1, float) or isinstance(text2, float): continue
        prompt_content = get_relation_prompt(text1[-500:], text2[:500])
        out = call_with_messages(prompt_content, max_token=3)
        print(
            out
        )
        time.sleep(2)
        if 'invalid' in out:
            out = '不存在'
        merged_info[i - 1]['32b_concat'] = out
    merged_info[-1]['32b_concat'] = '不存在'

    return merged_info


def get_type_prompt(content):
    prompt = f"""
    你是一个内容标注员,结合上下文提取给定文本的类型.
    ------
    规则如下:
    - 如果认为该文本存在新闻,请返回[新闻]
    - 如果认为该文本存在影视剧,请返回[影视剧]
    - 如果认为该文本存在综艺,请返回[综艺]
    - 如果认为该文本存在歌曲,请返回[歌曲]
    - 如果认为该文本存在广告,请返回[广告]
    - 如果不确定该文本类型,请返回[未知]
    ---------
    转录文本：[{content}]
    """
    return prompt


def find_adv_edge(merged_info, transcript):
    def get_transcript_row_id(transcript, start_txt, end_txt, start_cache):
        start_id = None
        end_id = None
        found_start = False

        for index, row in transcript.iterrows():
            if start_cache and row['start'] < start_cache[-1]:
                continue
            if ((len(row['text']) < len(start_txt) and row['text'] in start_txt) or
                    (len(row['text']) >= len(start_txt) and start_txt in row['text'])):
                start_id = index
                found_start = True
            elif found_start and ((len(row['text']) < len(end_txt) and row['text'] in end_txt) or
                                  (len(row['text']) >= len(end_txt) and end_txt in row['text'])) and index >= start_id:
                end_id = index
                break

        return start_id, end_id

    def format_out(row, single_flag, llm_flag, entity_name):
        row['llm_output'] = llm_flag
        row['single_entity'] = single_flag
        row['entity_name'] = entity_name

    new_merged_info = []
    start_cache = []
    for i in range(len(merged_info)):
        # if i < 35:
        #     continue
        content = merged_info[i]['content']
        # if '本周晚二十一点三十分播出，一路有 AI' not in content:
        #     continue
        commercial_data = entity_parse(content, max_output=3000)
        # entities = []
        # for data in commercial_data:

        merged_info[i]['middle_res'] = commercial_data
    return merged_info
        # edge_info = edge_info_parse(content, 3000)
        # if commercial_data:
        #     edge_info = find_edge_from_entity(content, commercial_data, max_output=3000)
    #     is_single_entity = single_entity_cls(content, max_output=200)
    #     if is_single_entity is not None:
    #         single_flag = is_single_entity[0]
    #         entity_name = is_single_entity[1]
    #         if single_flag == 1:
    #             if len(content) > 300:
    #                 if '-' in entity_name:
    #                     entity_name = entity_name.split('-')[0]
    #                 if content.count(entity_name) < 2:
    #                     continue
    #             format_out(merged_info[i], single_flag, 1, entity_name)
    #             new_merged_info.append(merged_info[i])
    #             continue
    #
    #     edge_info = edge_info_parse(content, max_output=3000)
    #     if edge_info is not None:
    #         for info in edge_info:
    #             start_id, end_id = get_transcript_row_id(transcript, info['start'],info['end'], start_cache)
    #             if start_id is None or end_id is None:
    #                 continue
    #             start = transcript.iloc[start_id]['start']
    #             end = transcript.iloc[end_id]['end']
    #             text = ''.join(transcript.iloc[start_id:end_id + 1]['text'])
    #             start_cache.append(start)
    #             is_single_entity = single_entity_cls(text, max_output=200)
    #             if is_single_entity is not None:
    #                 single_flag = is_single_entity[0]
    #                 entity_name = is_single_entity[1]
    #                 if single_flag == 1:
    #                     if '-' in entity_name:
    #                         entity_name = ''.join(entity_name.split('-'))
    #                     tmp_info = {'start': start,
    #                                 'end': end,
    #                                 'content': text}
    #                     format_out(tmp_info, single_flag, 1, entity_name)
    #                     new_merged_info.append(tmp_info)
    #                 else:
    #                     is_public = public_entity_cls(text, max_output=200)
    #                     if is_public is not None:
    #                         public_flag = is_public[0]
    #                         entity_name = is_public[1]
    #                         if public_flag == 1:
    #                             tmp_info = {'start': start,
    #                                         'end': end,
    #                                         'content': text}
    #                             format_out(tmp_info, public_flag, 1, entity_name)
    #                             new_merged_info.append(tmp_info)
    #             else:
    #                 is_public = public_entity_cls(text, max_output=200)
    #                 if is_public is not None:
    #                     public_flag = is_public[0]
    #                     entity_name = is_public[1]
    #                     if public_flag == 1:
    #                         tmp_info = {'start': start,
    #                                     'end': end,
    #                                     'content': text}
    #                         format_out(tmp_info, public_flag, 1, entity_name)
    #                         new_merged_info.append(tmp_info)
    #
    # return new_merged_info


def single_entity_cls(content, max_output=200):
    out = call_with_messages(get_entity_prompt(content), max_token=max_output)
    tuple_pattern = r'\((.*?)\)'
    match = re.search(tuple_pattern, out, re.DOTALL)
    if match:
        tuple_str = match.group(0)
        try:
            import ast
            is_single = ast.literal_eval(tuple_str)
            return is_single
        except:
            print("无法解析字符串为元组")
    else:
        print("未找到匹配的文本")

def find_edge_from_entity(content, entity, max_output=200):
    out = call_with_messages(get_edge_from_entity(content, entity), max_token=max_output)
    json_pattern = r'\[(.*?)\]'
    match = re.search(json_pattern, out, re.DOTALL)
    if match:
        json_str = match.group(0)
        try:
            # 将提取的 JSON 字符串解析为 Python 对象
            json_data = json.loads(json_str)
            return json_data
        except json.JSONDecodeError as e:
            print(f"JSON 解析错误: {e}")
    else:
        print("未找到匹配的 JSON 部分")

def entity_parse(content, max_output=3000):
    out = call_with_messages(get_all_entity_prompt(content), max_token=max_output)
    time.sleep(3)
    print(out)
    json_pattern = r'\[(.*?)\]'
    match = re.search(json_pattern, out, re.DOTALL)
    if match:
        json_str = match.group(0)
        try:
            # 将提取的 JSON 字符串解析为 Python 对象
            json_data = json.loads(json_str)
            return json_data
        except json.JSONDecodeError as e:
            print(f"JSON 解析错误: {e}")
    else:
        print("未找到匹配的 JSON 部分")



def public_entity_cls(content, max_output=200):
    out = call_with_messages(get_public_prompt(content), max_token=max_output)
    tuple_pattern = r'\((.*?)\)'
    match = re.search(tuple_pattern, out, re.DOTALL)
    if match:
        tuple_str = match.group(0)
        try:
            import ast
            is_single = ast.literal_eval(tuple_str)
            return is_single
        except:
            print("无法解析字符串为元组")
    else:
        print("未找到匹配的文本")


def edge_info_parse(content, max_output=3000):
    out = call_with_messages(get_egde_prompt(content), max_token=max_output)
    json_pattern = r'\[(.*?)\]'
    match = re.search(json_pattern, out, re.DOTALL)
    if match:
        json_str = match.group(0)
        try:
            # 将提取的 JSON 字符串解析为 Python 对象
            json_data = json.loads(json_str)
            return json_data
        except json.JSONDecodeError as e:
            print(f"JSON 解析错误: {e}")
    else:
        print("未找到匹配的 JSON 部分")


def call_with_messages(content, max_token=6):
    try:
        client = OpenAI(
            # 若没有配置环境变量，请用百炼API Key将下行替换为：api_key="sk-xxx",
            api_key=os.getenv("DASHSCOPE_API_KEY"),
            base_url="https://dashscope.aliyuncs.com/compatible-mode/v1",
        )

        completion = client.chat.completions.create(
            model="qwen2.5-32b-instruct",  # 模型列表：https://help.aliyun.com/zh/model-studio/getting-started/models
            messages=[
                {'role': 'system', 'content': 'You are a helpful assistant.'},
                {'role': 'user', 'content': content}
            ],
            max_tokens=max_token,
            temperature=0.7
        )
        return completion.choices[0].message.content
    except Exception as e:
        print(f"错误信息：{e}")
        print("请参考文档：https://help.aliyun.com/zh/model-studio/developer-reference/error-code")
        out = 'invalid'
        return out


def get_entity_prompt(content):
    prompt = f"""
    你是一个广告识别员,结合上下文,判断文本是否通篇在介绍单独一个产品,并分析其厂家名称.
    ------
    规则如下:
    - 如果文本通篇在介绍单个产品且具有明显厂家名称,则返回格式:(1, '厂家名称-产品名称')
    - 如果文本存在多个产品,则返回格式:(2, '未知')
    - 如果文本不存在介绍产品或无明显品牌名称,则返回格式:(0, '不存在')
    ---------
    文本：{content}
    """
    return prompt


def get_all_entity_prompt(content):
    prompt = f"""
    你是一个广告内容标注员,列出文本中所有存在的商业广告和公益广告,并分析其主体,以及其类型.
    注意: 不确定的广告不要写入!!注意区分新闻和公益广告,公益广告需要具有宣传公益的广告风格
    ------
    返回格式为:
    [{{
    "entity": "品牌名称-产品",
    "type": "商业广告"
    }},{{
    "entity": "宣传主体",
    "type": "公益广告"
    }}]
    ---------
    文本：{content}
    """
    return prompt


def get_public_prompt(content):
    prompt = f"""
    你是一个公益广告识别员,结合上下文,判断文本是否为一段公益广告.
    ------
    规则如下:
    - 如果文本为一段公益广告,则返回格式:(1, '宣传主题')
    - 如果文本不是公益广告,则返回格式:(0, '不存在')
    ---------
    文本：{content}
    """
    return prompt


def get_egde_prompt(content):
    prompt = f"""
    你是一个内容标注员,给定文本,过滤非广告部分.提取所有广告部分的开头和结尾,并返回开头和结尾的文本.
    广告分为商业和公益,商业广告包含介绍产品,公益广告宣传公益
    ------
    文本：{content}
    ------
    若无广告,返回空数组
    若有广告,返回格式如下:
    [{{
    "start": "开头文本",
    "end": "结尾文本"
    }},{{
    "start": "开头文本",
    "end": "结尾文本"
    }}]
    """
    return prompt

def get_edge_from_entity(content, entity):
    prompt = f"""
    你是一个内容标注员,给定{entity},结合上下文找出其中所有的广告相关内容,并返回相关文本.
    ------
    文本：{content}
    ------
    若无广告,返回空数组
    若有广告,返回格式如下:
    [{{
    "entity": "广告主体",
    "type": "广告类型",
    "text": "相关文本"
    }},{{
    "entity": "广告主体",
    "type": "广告类型",
    "text": "相关文本"
    }}]
    """
    return prompt

if __name__ == '__main__':
    import ast

    dir_path = "E:\\projects\\江苏数据"
    for channel in os.listdir(dir_path):
        if channel != "227": continue
        cut_csv = os.path.join(dir_path, channel, "tmp.csv")
        out_csv = os.path.join(dir_path, channel, "out.csv")
        transcript_csv = os.path.join(dir_path, channel, "00-00-00-1-transcript.csv")
        transcript = pd.read_csv(transcript_csv)
        df = pd.read_csv(cut_csv)
        merged_info = df.to_dict(orient='records')
        relation_info = generate_content_relation(merged_info)
        # merged_info = concat_content(relation_info)
        # relation_info = find_adv_edge(merged_info, transcript)
        # with open(out_csv[:-4]+".json", "w", encoding="utf-8") as f:
        #     json_data = json.dumps(relation_info, indent=4, ensure_ascii=False)
        #     f.write(json_data)
        relation_info = pd.DataFrame(relation_info)
        relation_info.to_csv(out_csv, index=False)
        break
