from typing import Union, Optional, Tuple

import dspy

from .callback import BaseCallbackHandler
from .storm_dataclass import StormInformationTable, StormArticle
from ...interface import OutlineGenerationModule
from ...utils import ArticleTextProcessing
from .savetemp import savetemp


#嵌入向量提取+聚类
import numpy as np
from sentence_transformers import SentenceTransformer
from tqdm import tqdm

import numpy as np

# cluster
from sklearn.cluster import AgglomerativeClustering,KMeans,OPTICS
from sklearn.mixture import GaussianMixture
from sklearn.metrics import silhouette_score

import umap
import argparse
from tqdm import tqdm
import time
import concurrent.futures
from concurrent.futures import ProcessPoolExecutor

def extract_data_for_clustering(data,top_k=1000):

    data = data[0:np.min([top_k,len(data)])]
    # Extract vectors, titles, and abstracts
    vectors = [item[3] for item in data]
    titles_abstracts = [(item[0], item[1],item[2]) for item in data]

    # Convert list of vectors to a numpy matrix
    vector_matrix = np.array(vectors)

    return vector_matrix, titles_abstracts


def cluster_papers(embeddings, titles_abstracts, n_clusters, cluster_method, is_umap, do_bic,do_silhouette):


  if is_umap:
    embeddings = umap.UMAP(n_neighbors= np.min([int((len(embeddings) - 1) ** 0.5) ,50])  ,
                           n_components=20,
                           min_dist=0,
                           metric='cosine',
                           random_state=42
                         ).fit_transform(embeddings)

  if cluster_method == 'Kmeans':
    clustering_model = KMeans(n_clusters=n_clusters)

  elif cluster_method == 'HCL':  # Renamed from 'HCl' for consistency
    clustering_model = AgglomerativeClustering(
                                            # metric='cosine',
                                            distance_threshold=2,
                                            n_clusters=None)
  elif cluster_method == 'OPTICS':
    clustering_model = OPTICS(metric='cosine', min_samples=2)

  elif cluster_method == 'GMM':
    covariance_type = 'full'
    threshold = 0.1
    min_clusters = 5
    #reg_covar = 1e-6
    # Select the optimal number of clusters based on BIC
    bic_scores = []
    silhouette_scores = []
    optimal_n_clusters = n_clusters
    cluster_range = range(min_clusters, n_clusters + 1)
    #cluster_range = range(1, n_clusters*2 + 1)

    #进行聚类并行设置
    # for n in tqdm(cluster_range):
    #     gmm = GaussianMixture(n_components=n,
    #                           random_state=42,
    #                           covariance_type=covariance_type)
    #     gmm.fit(embeddings)
    #     if do_bic:
    #         bic_scores.append(gmm.bic(embeddings))
    #     if do_silhouette and n > 1:  # Silhouette score requires at least 2 clusters to be meaningful
    #         cluster_labels = gmm.predict(embeddings)
    #         silhouette_scores.append(silhouette_score(embeddings, cluster_labels))


    # # Select the optimal number of clusters based on BIC or silhouette
    # if do_bic:
    #     optimal_n_clusters = cluster_range[np.argmin(bic_scores)]
    #     print(f"Optimal number of clusters based on BIC (GMM): {optimal_n_clusters}")
    # if do_silhouette:
    #     optimal_n_clusters = cluster_range[np.argmax(silhouette_scores)]
    #     print(f"Optimal number of clusters based on silhouette score (GMM): {optimal_n_clusters}")
    # if do_bic:
    #     optimal_n_clusters = min(bic_scores, key=lambda x: x[1])[0]  # 选 BIC 最小值
    #     print(f"Optimal clusters based on BIC: {optimal_n_clusters}")
    
    # if do_silhouette:
    #     optimal_n_clusters = max(silhouette_scores, key=lambda x: x[1])[0]  # 选 Silhouette 最大值
    #     print(f"Optimal clusters based on Silhouette: {optimal_n_clusters}")



    optimal_n_clusters = 80

    # Create the final model with the selected number of clusters
    clustering_model = GaussianMixture(n_components=optimal_n_clusters,
                                       covariance_type=covariance_type,
                                       random_state=42)

    print(f"Optimal number of clusters (GMM): {optimal_n_clusters}")

  else:  # Handle invalid clustering methods
    raise ValueError("Invalid clustering method specified")

  #通过计算概率进行聚类
  clustering_model.fit(embeddings)
  if cluster_method =='GMM':
    probabilities = clustering_model.predict_proba(embeddings)
    cluster_assignment =  [np.where(p > threshold)[0] for p in probabilities]

  else:
    cluster_assignment = clustering_model.labels_

  clusters = dict()
  for paper_id,cluster_names in enumerate(cluster_assignment):
    for cluster in cluster_names:
        if cluster not in clusters:
          clusters[cluster] = []

        clusters[cluster].append(titles_abstracts[paper_id])

  clusters = dict(sorted(clusters.items(), key=lambda item: len(item[1]),reverse=True))
  



  # Preparing output
  # 直接砍掉论文数量小于3的类别
  cluster_output = {}
  for cluster_id, papers in clusters.items():
          cluster_output[str(cluster_id)] = [
              {'title': paper[0], 'abstract': paper[1], 'link': paper[2]} for paper in papers
          ]



  return cluster_output,embeddings,cluster_assignment







class StormOutlineGenerationModule(OutlineGenerationModule):
    """
    The interface for outline generation stage. Given topic, collected information from knowledge
    curation stage, generate outline for the article.
    """

    def __init__(self, outline_gen_lm: Union[dspy.dsp.LM, dspy.dsp.HFModel]):
        super().__init__()
        self.outline_gen_lm = outline_gen_lm
        self.write_outline = WriteOutline(engine=self.outline_gen_lm)
        self.encoder = SentenceTransformer("paraphrase-MiniLM-L6-v2")  # 使用指定模型生成嵌入

    def generate_outline(
        self,
        topic: str,
        information_table: StormInformationTable,
        old_outline: Optional[StormArticle] = None,
        callback_handler: BaseCallbackHandler = None,
        return_draft_outline=False,
    ) -> Union[StormArticle, Tuple[StormArticle, StormArticle]]:
        """
        Generates an outline for an article based on the specified topic and the information
        gathered during the knowledge curation stage. This method can optionally return both the
        final article outline and a draft outline if required.

        Args:
            topic (str): The topic of the article.
            information_table (StormInformationTable): The information table containing the collected information.
            old_outline (Optional[StormArticle]): An optional previous version of the article outline that can
                be used for reference or comparison. Defaults to None.
            callback_handler (BaseCallbackHandler): An optional callback handler that can be used to trigger
                custom callbacks at various stages of the outline generation process, such as when the information
                organization starts. Defaults to None.
            return_draft_outline (bool): A flag indicating whether the method should return both the final article
                outline and a draft version of the outline. If False, only the final article outline is returned.
                Defaults to False.

        Returns:
            Union[StormArticle, Tuple[StormArticle, StormArticle]]: Depending on the value of `return_draft_outline`,
                this method returns either a single `StormArticle` object containing the final outline or a tuple of
                two  `StormArticle` objects, the first containing the final outline and the second containing the
                draft outline.
        """
        start_time = time.time()  # 记录开始时间

        if callback_handler is not None:
            callback_handler.on_information_organization_start()

        #加载Conversation内容
        concatenated_dialogue_turns = sum(
            [conv for (_, conv) in information_table.conversations], []
        )

        # 提取文本内容
        data_for_embedding = [
            {
                "title": result.title,
                "abstract":" ".join(result.snippets),
                "link": result.url,
            }
            for turn in concatenated_dialogue_turns
            for result in turn.search_results
        ]

        # 提取所有 abstract 文本
        text_contents = [f"{item['title']}. {item['abstract']}" for item in data_for_embedding]

        # 嵌入提取
        embeddings = self.encoder.encode(text_contents, show_progress_bar=True)

        # 嵌入结果整合
        embedded_data = [
            (item["title"], item["abstract"], item["link"], np.array(embedding))
            for item, embedding in zip(data_for_embedding, embeddings)
        ]

        ####对嵌入向量进行聚类操作####
        vector_matrix, titles_abstracts = extract_data_for_clustering(embedded_data,top_k=1000)

        n_clusters = len(vector_matrix) // 10 #Max number of clusters is 10% of the number of papers
        cluster_method = 'GMM'
        is_umap = True
        do_bic = False
        do_silhouette = True
        # Cluster the papers
        cluster_output,embeddings_after_cluster,cluster_assignment = cluster_papers(vector_matrix,
                                                                                    titles_abstracts,
                                                                                    n_clusters,
                                                                                    cluster_method,
                                                                                    is_umap,
                                                                                    do_bic,
                                                                                    do_silhouette)
        end_time = time.time()  # 记录结束时间
        elapsed_time = end_time - start_time  # 计算运行时间
        print(f"Extract Data and Cluster Time {elapsed_time:.2f} seconds")  # 输出时间


        #利用write_outline对加载的Conversation进行处理，获取大纲
        result = self.write_outline(
            cluster_output=cluster_output,
            topic=topic,
            dlg_history=concatenated_dialogue_turns,
            callback_handler=callback_handler,
        )
        article_with_outline_only = StormArticle.from_outline_str(
            topic=topic, outline_str=result.outline
        )
        article_with_draft_outline_only = StormArticle.from_outline_str(
            topic=topic, outline_str=result.old_outline
        )
        if not return_draft_outline:
            return article_with_outline_only
        return article_with_outline_only, article_with_draft_outline_only


@staticmethod
def generate_outline_for_papers(topic, papers):
    # 这里调用你的 LLM 方法来生成大纲
    model = dspy.Predict(WritePageOutline)  # 用 DSPy 运行推理
    result = model(topic=topic, papers=papers)  # 运行推理，生成 outline
    return result.outline.strip()  # 返回大纲内容

#对现有类别进行修改，首先生成段落级别的标题，最后合并成初始outline，在进行原版的操作
class WriteOutline(dspy.Module):
    """Generate the outline for structured long literature review."""

    def __init__(self, engine: Union[dspy.dsp.LM, dspy.dsp.HFModel]):
        super().__init__()
        self.draft_page_outline = dspy.Predict(WritePageOutline)
        self.order_page_outline = dspy.Predict(WriteOrderOutline)
        self.write_page_outline = dspy.Predict(WritePageOutlineFromConv)
        self.engine = engine

    def forward(
        self,
        topic: str,
        dlg_history,
        cluster_output,
        old_outline: Optional[str] = None,
        callback_handler: BaseCallbackHandler = None,
    ):

        
        trimmed_dlg_history = []
        for turn in dlg_history:
            if (
                "topic you" in turn.agent_utterance.lower()
                or "topic you" in turn.user_utterance.lower()
            ):
                continue
            trimmed_dlg_history.append(turn)
        conv = "\n".join(
            [
                f"Wikipedia Writer: {turn.user_utterance}\nExpert: {turn.agent_utterance}"
                for turn in trimmed_dlg_history
            ]
        )
        conv = ArticleTextProcessing.remove_citations(conv)
        conv = ArticleTextProcessing.limit_word_count_preserve_newline(conv, 5000)
        
        # savetemp
        savetemp("outline_generation/conv.txt", conv)

        with dspy.settings.context(lm=self.engine):

        ########在此处进行并行操作，提高LLM处理速度#######
            #在此处进行初始outline的生成替换
            if old_outline is None:
                with concurrent.futures.ThreadPoolExecutor(max_workers=30) as executor:
                    results = []

                    for item in cluster_output.items():
                            
                            # print(f"Cluster {item[0]} - Number of Papers: {len(item[1])}")
                            papers = "\n".join([f"Title: {paper['title']}\nAbstract: {paper['abstract']}" for paper in item[1]])
                            # print("\n".join([f"- {paper['title']}" for paper in item[1]]))
                            # print("-" * 50)  # 以分隔符区分不同类别

                            future = executor.submit(generate_outline_for_papers, topic, papers)
                            results.append(future)

                    # 组合所有生成的标题形成初始大纲
                    generated_outlines = []
                    for future in concurrent.futures.as_completed(results):
                            firsttitle=future.result()
                            # print(f"Generated First Outline for Outline {firsttitle}")
                            generated_outlines.append(firsttitle)                           
 

                    old_outline = "\n".join([f"# {outline}" for outline in generated_outlines])

            #此处对于old-outline做一个处理，如果开头不是“#”的全部删除
            old_outline = "\n".join(line for line in old_outline.splitlines() if line.startswith("#"))

            # #对组合好的大纲进行排序，获得一个完整的初始outline

            print("****** Directed ordered Outline *****")
            print(f"Final Old Outline:\n{old_outline}\n{'='*50}")  # 监测最终 Outline
            print("***************************************")

            #old_outline生成结束
            if callback_handler:
                callback_handler.on_direct_outline_generation_end(
                    outline=old_outline
                )
            outline = ArticleTextProcessing.clean_up_outline(
                self.write_page_outline(
                    topic=topic, old_outline=old_outline, conv=conv
                ).outline
            )
            if callback_handler:
                callback_handler.on_outline_refinement_end(outline=outline)

        return dspy.Prediction(outline=outline, old_outline=old_outline)


#更改这个class为单一的聚类题目生成+description
class WritePageOutline(dspy.Signature):
    """Generate a concise title for a structured long literature review section.
    Here is the format of your writing:
    1. Generate only one title for the given set of papers.
    2. The title should be informative and concise.
    3. Do not include the topic name itself in the title.
    4. Do not add any additional markdown symbols (such as "#", "**", or "###") or extra text in front of the title that you generate.
    """

    topic = dspy.InputField(prefix="The topic of structured long literature review you want to write: ", format=str)
    papers = dspy.InputField(prefix="Relevant papers for this section:\n", format=str)
    outline = dspy.OutputField(prefix="Generated section title:\n", format=str)


#根据每个题目的description，为大纲题目进行排序
class WriteOrderOutline(dspy.Signature):
    """Sort and structure a long literature review outline based on logical order.
    
    Here is the format of your writing:
    1. Sort the Unordered outline based on their descriptions to ensure a coherent logical new outline.
    2. Use "#" for section titles, like "# ABC"
    3. Do not include descriptions in the final output, only the sorted titles.
    """

    old_outline = dspy.InputField(prefix="Unordered outline with descriptions:\n", format=str)
    outline = dspy.OutputField(prefix="Ordered outline:\n", format=str)


class NaiveOutlineGen(dspy.Module):
    """Generate the outline with LLM's parametric knowledge directly."""

    def __init__(self):
        super().__init__()
        self.write_outline = dspy.Predict(WritePageOutline)

    def forward(self, topic: str):
        outline = self.write_outline(topic=topic).outline

        return dspy.Prediction(outline=outline)


class WritePageOutlineFromConv(dspy.Signature):

    #此处加入了官方对于综述结构的要求
    """
    Improve an outline for a structured long literature review. You already have a draft outline that covers the general information.Now you want to improve it based on the information learned from an information-seeking conversation to make it more informative.
    Ensure that the review is concise in structure, clear in logic, with a reasonable arrangement of content in each section, and that the transitions between adjacent sections are smooth and free of redundancy.
    Here is the format of your writing:
    1. Use "#" Title" to indicate section title, "##" Title" to indicate subsection title, "###" Title" to indicate subsubsection title, and so on.
    2. Do not include other information.
    3. Do not include topic name itself in the outline.
    """

    topic = dspy.InputField(prefix="The topic you want to write: ", format=str)
    conv = dspy.InputField(prefix="Conversation history:\n", format=str)
    old_outline = dspy.OutputField(prefix="Current outline:\n", format=str)
    outline = dspy.OutputField(
        prefix='Write the improved outline for this structured long literature review (Use "#" Title" to indicate section title, "##" Title" to indicate subsection title, ...):\n',
        format=str,
    )
