import os
import json
from datetime import time
from langchain_community.chat_models import ChatOpenAI
from langchain_core.prompts import ChatPromptTemplate
from openai import APIConnectionError
from datasketch import MinHash, MinHashLSH
from simhash import Simhash, SimhashIndex
from processnews import split_text_into_paragraphs

from partofproduct import split_news

os.environ["OPENAI_API_BASE"] = 'https://oneapi.xty.app/v1'
# 3.5
os.environ["OPENAI_API_KEY"] = 'sk-VEDbE0OfLniuK7eJEb7067B883944d57B1Dc2a638f8f7bBb'


# 4
# os.environ["OPENAI_API_KEY"] ='sk-SS3E9A82RI9P3rUg51E24031001949D59dBa51E0CeB6D3Fb'


def get_story():
    chat = ChatOpenAI(temperature=0.0)
    num = 200
    template_string = f"""
        讲个故事，{num}汉字。
                            """
    prompt_template = ChatPromptTemplate.from_template(template_string)
    prompts = prompt_template.format_messages(
        num=num
    )
    print(prompts)
    for i in range(1, 3):
        try:
            response = chat(prompts)
            print(response)
        except APIConnectionError as e:
            time.sleep(10)
            continue
        else:
            break
    response = str(response)
    return response


# 创建一个 MinHash 对象的函数
def create_minhash(data):
    minhash = MinHash(num_perm=128)  # num_perm 是哈希函数的数量，可以根据需要调整
    for d in data:
        minhash.update(d.encode('utf8'))
    return minhash


# 给定一组段落，创建MinHash对象并插入到LSH中，然后查找并去除重复或高度相似的段落
def deduplicate_paragraphs(paragraphs):
    lsh = MinHashLSH(threshold=0.5, num_perm=128)  # 设置一个较高的相似度阈值以识别重复
    unique_paragraphs = []
    paragraph_map = {}

    for idx, paragraph in enumerate(paragraphs):
        minhash = create_minhash(list(paragraph))
        lsh.insert(f"p{idx}", minhash)
        paragraph_map[f"p{idx}"] = paragraph

    for idx, paragraph in enumerate(paragraphs):
        minhash = create_minhash(list(paragraph))
        similar_indices = lsh.query(minhash)
        # 去除自身
        similar_indices.remove(f"p{idx}")
        if similar_indices:
            similar_texts = [paragraph_map[idx] for idx in similar_indices]
            print(f"段落 {idx}：'{paragraph}' 与以下段落相似，已被去除：{similar_texts}")
        else:
            unique_paragraphs.append(paragraph)

    return unique_paragraphs, paragraph_map


def create_simhash(text):
    return Simhash(text)


# def deduplicate_paragraphs_simhash(paragraphs):
#     objs = [(str(idx), create_simhash(paragraph)) for idx, paragraph in enumerate(paragraphs)]
#     index = SimhashIndex(objs, k=17)  # k是汉明距离的阈值
#
#     unique_paragraphs = []
#     seen = set()  # 用于记录已经处理过的段落
#
#     for idx, paragraph in enumerate(paragraphs):
#         if idx in seen:
#             continue  # 如果当前段落已处理，则跳过
#
#         simhash = create_simhash(paragraph)
#         near_duplicates = index.get_near_dups(simhash)
#
#         # 检查是否有重复的段落，并更新seen集合
#         if len(near_duplicates) > 1:
#             for dup in near_duplicates:
#                 seen.add(int(dup))
#
#             # 只添加第一个遇到的段落到结果集中
#             unique_paragraphs.append(paragraph)
#
#             # 打印重复段落信息
#             print(f"段落 {idx}：'{paragraph}' 与以下段落相似，已被去除：")
#             for dup in near_duplicates:
#                 if int(dup) != idx:
#                     print(f"  相似段落 {dup}: '{paragraphs[int(dup)]}'")
#         else:
#             unique_paragraphs.append(paragraph)
#             seen.add(idx)
#
#     return unique_paragraphs

def deduplicate_paragraphs_simhash(paragraphs_with_news_id):
    objs = [(str(idx), create_simhash(paragraph)) for idx, (paragraph, _) in enumerate(paragraphs_with_news_id)]
    index = SimhashIndex(objs, k=17)  # k是汉明距离的阈值

    unique_paragraphs_with_news_id = []
    seen = set()

    for idx, (paragraph, news_id) in enumerate(paragraphs_with_news_id):
        if idx in seen:
            continue

        simhash = create_simhash(paragraph)
        near_duplicates = index.get_near_dups(simhash)

        if len(near_duplicates) > 1:
            print(f"段落 {idx}：'{paragraph}' 与以下段落相似，已被去除：")
            for dup in near_duplicates:
                if int(dup) != idx:
                    print(f"  相似段落 {dup}: '{paragraphs_with_news_id[int(dup)][0]}'")

            # 添加第一个找到的唯一段落及其新闻ID
            unique_paragraphs_with_news_id.append((paragraph, news_id))
            for dup in near_duplicates:
                seen.add(int(dup))
        else:
            unique_paragraphs_with_news_id.append((paragraph, news_id))
            seen.add(idx)

    return unique_paragraphs_with_news_id


# def test_by_news(news):
#
#     paragraphs = []
#
#     for new in news:
#         paragraph = split_text_into_paragraphs(new["content"], 200)
#         for con in paragraph:
#             paragraphs.append(con)
#
#
#     # 执行去重
#     # unique_paragraphs, paragraph_map = deduplicate_paragraphs(paragraphs)
#     unique_paragraphs = deduplicate_paragraphs_simhash(paragraphs)
#
#     # 打印结果
#     print("去重后的段落：")
#     for paragraph in unique_paragraphs:
#         print(paragraph)
#
#     return None

def test_by_news(news):
    paragraphs_with_news_id = []

    for news_id, new in enumerate(news):
        paragraphs = split_text_into_paragraphs(new["content"], 200)
        for paragraph in paragraphs:
            paragraphs_with_news_id.append((paragraph, news_id))

    unique_paragraphs_with_news_id = deduplicate_paragraphs_simhash(paragraphs_with_news_id)

    # 初始化一个列表来存储重组后的新闻，保持与原始新闻相同的结构
    reconstructed_news = [new.copy() for new in news]  # 深复制每篇新闻，以便我们可以修改它们

    # 清空每篇新闻的content，准备填入去重后的内容
    for new in reconstructed_news:
        new["content"] = ""

    # 根据新闻ID重组新闻内容
    for paragraph, news_id in unique_paragraphs_with_news_id:
        reconstructed_news[news_id]["content"] += paragraph

    return reconstructed_news


def test_by_news_v2(news):
    paragraphs = []
    paragraph_info = []  # 存储段落及其新闻ID和顺序

    for news_id, new in enumerate(news):
        paragraph_list = split_text_into_paragraphs(new["content"], 200)
        for paragraph_index, paragraph in enumerate(paragraph_list):
            paragraphs.append(paragraph)
            # 保存段落信息，包括所属新闻ID和在新闻中的顺序
            paragraph_info.append((paragraph, news_id, paragraph_index))

    # 去重后的段落信息
    unique_paragraphs_info = deduplicate_paragraphs_simhash(paragraph_info)

    # 重组新闻
    news_dict = {}
    for paragraph, news_id, paragraph_index in unique_paragraphs_info:
        if news_id not in news_dict:
            news_dict[news_id] = []
        news_dict[news_id].append((paragraph_index, paragraph))

    reconstructed_news = []
    for news_id, paragraphs in news_dict.items():
        sorted_paragraphs = sorted(paragraphs, key=lambda x: x[0])  # 根据原始顺序排序
        reconstructed_paragraphs = [p[1] for p in sorted_paragraphs]  # 提取段落文本
        news_text = "\n".join(reconstructed_paragraphs)  # 合并段落为一篇新闻
        reconstructed_news.append(news_text)

    return reconstructed_news