# coding:utf-8
import os
import re

import numpy as np
import requests
from emoji import is_emoji


# def stream2text(stream):
#     """
#     信息流处理,将信息流转换为文本
#     Args:
#         stream:初始的信息流数据
#     Returns:
#         处理后的原始文本
#
#     """
#     data_stream = stream
#     # 初始化一个标志，用于检测是否遇到了"finish"事件
#     finished = False
#     full_text = []
#
#     # 检查数据流是否为空或不包含"data:"，如果是则记录日志并返回空字符串
#     if not data_stream or "data:" not in data_stream:
#         logger.info("数据流为空或不包含'data:'{}".format(data_stream))
#         return "".lstrip()
#
#     # 分割数据流，因为每个事件数据以"data:"开头，所以以此为分隔符
#     events = data_stream.split("data:")[1:]  # 跳过第一个空字符串
#
#     for event_str in events:
#         # 尝试将字符串转换为字典，这里假设event_str不包括"data:"前缀
#         try:
#             event_str = event_str.replace('\n', '')
#             event_dict = json.loads(event_str)
#         except json.JSONDecodeError as e:
#             logger.error("解析jsom出错: {}}".format(e))
#             continue  # 如果解析出错，则跳过此次循环
#
#         # 检查是否为"finish"事件
#         if event_dict.get("event") == "finish":
#             finished = True
#         elif event_dict.get("data") == "> **end-searching**":
#             # 跳过此数据包
#             continue
#         elif event_dict.get("event") == "chat":
#             # 提取"data"字段的值，并解码Unicode序列
#             data_value = event_dict.get("data", "")
#             full_text.append(data_value)
#
#     # 当所有处理完成后，打印完整的文字（确保是在finish之后的chat事件被正确处理）
#     if finished:
#         full_text = ''.join(full_text)
#         full_text = full_text.replace("> **end-searching**", "")
#         full_text = full_text.lstrip()
#         return full_text
#     else:
#         logger.info("未找到'finish'事件,接收到信息流为{}".format(stream))
#         return ''.join(full_text).lstrip()


# def find_match(full_text, patterns):
#     """
#     属于final_text函数,正则表达式匹配模式
#     Args:
#         full_text:需要进行匹配处理的文本
#         patterns: 匹配模式
#     Returns:
#         匹配到的文本
#     """
#     for pattern in patterns:
#         matches = re.findall(pattern, full_text)
#         if matches:
#             return matches
#     return []


# def remove_symbol(text_):
#     text_ = text_.replace(' ', '')
#     text_ = text_.replace('*', '')
#     if "小程序" not in text_:
#         text_ = text_.replace('#', '')
#     # text = text.replace('-', '')
#     text_ = text_.replace('[', '')
#     text_ = text_.replace(']', '')
#     text_ = text_.replace('\\n', '')
#     text_ = text_.replace('\\', '')
#     return text_


# def final_text(full_text):
#     """
#     对原始文本进行处理,匹配"NextWords","Next"或"Words",对多余符号进行处理
#     Args:
#         full_text: 文本
#     Returns:
#         处理后的文本
#     """
#     if "NextWords" in full_text or "Next" in full_text or "Words" in full_text:
#         # 使用正则表达式匹配所有"NextWords"后的内容
#         pattern_1 = r'NextWords.*?[:\s]*(.*)'
#         pattern_2 = r'Next.*?[:\s]*(.*)'
#         pattern_3 = r'Words.*?[:\s]*(.*)'
#
#         patterns = [pattern_1, pattern_2, pattern_3]
#         all_matches = find_match(full_text, patterns)
#
#         # 如果找到至少一个匹配项
#         if all_matches:
#             last_match = all_matches[-1]  # 获取并清理最后一个匹配项
#             last_match = remove_symbol(last_match)
#             # 去除前导的 冒号、双引号、左括号
#             content = last_match.lstrip(':\"({').strip()
#             if content.startswith('"'):
#                 content = content[1:]
#             else:
#                 # 如果没有找到左侧的双引号，直接返回原文本
#                 content = content
#             # 去除尾随的 双引号、右括号、逗号、换行符
#             content = content.rstrip('")},\n')
#             content = content.replace('【undefined】', '')
#             content = content.replace('\n', '')
#             content = content.strip()
#             return content  # 最后再去掉可能的前导和尾随空白
#     else:
#         logger.info("未找到与'NextWords'、'Next'、'Words'对应的内容")
#         lines = full_text.split('\n')
#         # 检查前两行是否分别或共同包含"search"和"mclick"
#         if "search" in full_text and "mclick" in full_text:
#             logger.info("检测到search和mclick字段")
#             lines = [line for line in lines if not ("search" in line or "mclick" in line)]
#             lines = lines[1:]  # 去掉第一行的 json 字段
#             full_text = '\n'.join(lines)
#
#         # 检查是否包含"IntentionStag"
#         if "IntentionStage" in full_text:
#             logger.info("检测到IntentionStage字段")
#             lines = lines[6:]
#             full_text = '\n'.join(lines)
#
#         if "task" in full_text and "reference" in full_text:
#             logger.info("检测到task...字段reference...")
#             full_text = re.sub(r'\{.*?\}', '', full_text, flags=re.DOTALL)
#             lines = full_text.split('\n')
#             lines = lines[1:]
#             full_text = '\n'.join(lines)
#
#         else:
#             # logger.info("未检测到特别字段")
#             return ""
#
#         # 合并剩下的行
#         filtered_text = remove_symbol(full_text)
#         return filtered_text


# def cut_text(text, min_len, step=50):
#     """
#     长文本截断
#     """
#     # print(min_len)
#     text = text.replace('\n\n', '\n')
#
#     if "1." in text and "2." in text:   # 有序列表截断
#         # 查找最后一个数字列表项的正则表达式
#         pattern = r"(\d\..*?)\n"
#         matches = list(re.finditer(pattern, text, re.DOTALL))
#
#         if matches:
#             # 获取最后一个匹配的结束位置
#             last_end = matches[-1].end()
#             # 分割文本
#             first_part = text[:last_end].strip()
#             second_part = text[last_end:].strip()
#             return [first_part, second_part]
#         else:
#             # 如果没有找到匹配项，返回整个文本作为一个元素的列表
#             return [text.strip()]
#     else:  # 非有序列表截断
#         split_patten = '[' + ''.join(['。']) + ']'
#         split_text = re.split(split_patten, text)
#
#         len_list = np.array([len(x) for x in split_text])
#         sum_len_list = np.cumsum(len_list)
#         result_list = []
#         end_point = 0
#         pre_index = 0
#
#         while end_point <= sum_len_list[-1]:
#             end_point += step
#             now_index = find_now_index(end_point, sum_len_list)
#             if np.sum(len_list[pre_index:now_index]) >= min_len:
#                 result_list.append('。'.join(split_text[pre_index:now_index]))
#                 pre_index = now_index
#                 for i in range(now_index):
#                     split_text[i] = None
#
#         for pre_index in range(len(split_text)):
#             if split_text[pre_index] is not None:
#                 result_list.append(split_text[pre_index])
#
#         result_list = [item.strip() for item in result_list]
#
#         for _ in range(2):
#             # 对分段后的最后一段进行判断
#             if len(result_list) < 2:
#                 return result_list
#             second_last_item_length = len(result_list[-2])
#             last_item_length = len(result_list[-1])
#             if last_item_length < second_last_item_length / 2:
#                 if not is_emoji(result_list[-1]) and '[' not in result_list[-1]:
#                     result_list[-2] += "，"
#                 result_list[-2] += result_list[-1]
#                 result_list.pop()
#                 result_list[-1] = result_list[-1][:-1] + '。' if result_list[-1].endswith('，') else result_list[-1]
#         return result_list


def cut_text(text, max_len):
    # 使用正则表达式匹配中文句子结束符
    sentences = re.split('([。！？])', text)
    # sentences = re.split('(。)', text)

    len_1 = []
    temp = ""
    # 重新组合句子，去除空字符串
    for i in range(0, len(sentences), 2):
        if sentences[i]:
            temp += sentences[i]
        if i < len(sentences) - 1:
            temp += sentences[i + 1]
        len_1.append(temp.strip())
        temp = ""

    len_2 = []
    current_segment = ""

    for sentence in len_1:
        if len(current_segment + sentence) <= max_len:
            if current_segment:
                current_segment += ""
            current_segment += sentence
        else:
            len_2.append(current_segment)
            current_segment = sentence

    # 处理最后一个片段
    if current_segment:
        # elif len_2 and len(len_2[-1]) + len(current_segment) <= max_len:
        #     len_2[-1] += " " + current_segment
        if len(current_segment) > max_len / 3:
            len_2.append(current_segment)
        else:
            len_2[-1] += " " + current_segment

    return len_2


def find_now_index(now_point, sum_len_list):
    for i in range(len(sum_len_list) - 1):
        if sum_len_list[i] <= now_point < sum_len_list[i + 1]:
            return i + 1
    else:
        return 0


def split_text_with_links(text):
    """
    将包含文本和链接的字符串拆分为列表。

    参数:
    text (str): 输入的字符串，包含文本和URL。

    返回:
    list: 拆分后的列表，每个部分（文本或链接）都是单独的元素。
    """
    # 匹配URL的正则表达式
    url_pattern = r'https?://[\w.\-/]+'

    # 查找所有URL
    urls = re.findall(url_pattern, text)

    # 使用URL替换原文本中的URL为特殊标记（这里假设URL是唯一的，且不会被其他文本包含）
    for url in urls:
        text = text.replace(url, "URL_PLACEHOLDER")

    # 按照非URL的文本拆分，并将之前替换的URL标记还原为实际的URL
    parts = re.split(r'(URL_PLACEHOLDER)', text)
    cleaned_parts = [part if part != 'URL_PLACEHOLDER' else urls.pop(0) for part in parts]

    return cleaned_parts


def download_file_from_url(url, local_filename=None):
    """
    从URL下载文件到本地。
    文件类型有：mp4，pdf，txt

    参数:
    url (str): 文件的URL。
    local_filename (str, optional): 本地保存的文件名。如果不提供，则使用URL的最后一部分作为文件名。
    """
    download_folder = 'downloads'
    if not os.path.exists(download_folder):
        os.makedirs(download_folder)

    # 构建完整的本地文件路径
    if local_filename is None:
        local_filename = os.path.basename(url.split('/')[-1])  # 确保文件名安全
    full_local_path = os.path.join(download_folder, local_filename)

    # 检查文件是否已经存在
    if os.path.exists(full_local_path):
        return full_local_path

    with requests.get(url, stream=True) as r:
        r.raise_for_status()  # 如果响应状态不是200，将抛出HTTPError异常
        with open(full_local_path, 'wb') as f:
            for chunk in r.iter_content(chunk_size=8192):
                f.write(chunk)
    return full_local_path
