"""
Author: Lucas
Date: 2024-05-11 08:19:44
LastEditors: Lucas
LastEditTime: 2024-05-11 08:19:57
Description: file content
"""

import json
import os
import re
from datetime import datetime, timedelta
from typing import Dict, List, Optional, Tuple

import numpy as np
import pandas as pd
import requests
import umap
import yfinance as yf
from langchain import hub
from langchain.prompts import ChatPromptTemplate
from langchain_community.vectorstores import Chroma
from langchain_core.output_parsers import StrOutputParser
from langchain_core.runnables import RunnablePassthrough
from langchain_openai import ChatOpenAI, OpenAIEmbeddings
# from langchain_text_splitters import RecursiveCharacterTextSplitter
from pandas.tseries.offsets import DateOffset
from sec_api import ExtractorApi
from sklearn.mixture import GaussianMixture
from tenacity import retry, stop_after_attempt, wait_random_exponential

from const import sec_api_key

USE_CACHE = True
RANDOM_SEED = 224  # Fixed seed for reproducibility

def correct_date(yr, dt):
    """Some transcripts have incorrect date, correcting it

    Args:
        yr (int): actual
        dt (datetime): given date

    Returns:
        datetime: corrected date
    """
    dt = datetime.strptime(dt, "%Y-%m-%d %H:%M:%S")
    if dt.year != yr:
        dt = dt.replace(year=yr)
    return dt.strftime("%Y-%m-%d %H:%M:%S")


def extract_speakers(cont: str) -> List[str]:
    """Extract the list of speakers

    Args:
        cont (str): transcript content

    Returns:
        List[str]: list of speakers
    """
    pattern = re.compile(r"\n(.*?):")
    matches = pattern.findall(cont)

    return list(set(matches))


@retry(wait=wait_random_exponential(min=1, max=5), stop=stop_after_attempt(2))
def get_earnings_transcript(quarter: str, ticker: str, year: int):
    """Get the earnings transcripts

    Args:
        quarter (str)
        ticker (str)
        year (int)
    """
    response = requests.get(
        f"https://discountingcashflows.com/api/transcript/{ticker}/{quarter}/{year}/",
        auth=("user", "pass"),
    )

    resp_text = json.loads(response.text)
    # speakers_list = extract_speakers(resp_text[0]["content"])
    corrected_date = correct_date(resp_text[0]["year"], resp_text[0]["date"])
    resp_text[0]["date"] = corrected_date
    return resp_text[0]


class Raptor:
    def __init__(self, model, embed):
        self.model = model
        self.embd = embed

    def global_cluster_embeddings(
        self,
        embeddings: np.ndarray,
        dim: int,
        n_neighbors: Optional[int] = None,
        metric: str = "cosine",
    ) -> np.ndarray:
        """
        Perform global dimensionality reduction on the embeddings using UMAP.

        Parameters:
        - embeddings: The input embeddings as a numpy array.
        - dim: The target dimensionality for the reduced space.
        - n_neighbors: Optional; the number of neighbors to consider for each point.
                    If not provided, it defaults to the square root of the number of embeddings.
        - metric: The distance metric to use for UMAP.

        Returns:
        - A numpy array of the embeddings reduced to the specified dimensionality.
        """
        if n_neighbors is None:
            n_neighbors = int((len(embeddings) - 1) ** 0.5)
        return umap.UMAP(
            n_neighbors=n_neighbors, n_components=dim, metric=metric
        ).fit_transform(embeddings)

    def local_cluster_embeddings(
        self,
        embeddings: np.ndarray,
        dim: int,
        num_neighbors: int = 10,
        metric: str = "cosine",
    ) -> np.ndarray:
        """
        Perform local dimensionality reduction on the embeddings using UMAP, typically after global clustering.

        Parameters:
        - embeddings: The input embeddings as a numpy array.
        - dim: The target dimensionality for the reduced space.
        - num_neighbors: The number of neighbors to consider for each point.
        - metric: The distance metric to use for UMAP.

        Returns:
        - A numpy array of the embeddings reduced to the specified dimensionality.
        """
        return umap.UMAP(
            n_neighbors=num_neighbors, n_components=dim, metric=metric
        ).fit_transform(embeddings)

    def get_optimal_clusters(
        self,
        embeddings: np.ndarray,
        max_clusters: int = 50,
        random_state: int = RANDOM_SEED,
    ) -> int:
        """
        Determine the optimal number of clusters using the Bayesian Information Criterion (BIC) with a Gaussian Mixture Model.

        Parameters:
        - embeddings: The input embeddings as a numpy array.
        - max_clusters: The maximum number of clusters to consider.
        - random_state: Seed for reproducibility.

        Returns:
        - An integer representing the optimal number of clusters found.
        """
        max_clusters = min(max_clusters, len(embeddings))
        n_clusters = np.arange(1, max_clusters)
        bics = []
        for n in n_clusters:
            gm = GaussianMixture(n_components=n, random_state=random_state)
            gm.fit(embeddings)
            bics.append(gm.bic(embeddings))
        return n_clusters[np.argmin(bics)]

    def GMM_cluster(
        self, embeddings: np.ndarray, threshold: float, random_state: int = 0
    ):
        """
        Cluster embeddings using a Gaussian Mixture Model (GMM) based on a probability threshold.

        Parameters:
        - embeddings: The input embeddings as a numpy array.
        - threshold: The probability threshold for assigning an embedding to a cluster.
        - random_state: Seed for reproducibility.

        Returns:
        - A tuple containing the cluster labels and the number of clusters determined.
        """
        n_clusters = self.get_optimal_clusters(embeddings)
        gm = GaussianMixture(n_components=n_clusters, random_state=random_state)
        gm.fit(embeddings)
        probs = gm.predict_proba(embeddings)
        labels = [np.where(prob > threshold)[0] for prob in probs]
        return labels, n_clusters

    def perform_clustering(
        self,
        embeddings: np.ndarray,
        dim: int,
        threshold: float,
    ) -> List[np.ndarray]:
        """
        Perform clustering on the embeddings by first reducing their dimensionality globally, then clustering
        using a Gaussian Mixture Model, and finally performing local clustering within each global cluster.

        Parameters:
        - embeddings: The input embeddings as a numpy array.
        - dim: The target dimensionality for UMAP reduction.
        - threshold: The probability threshold for assigning an embedding to a cluster in GMM.

        Returns:
        - A list of numpy arrays, where each array contains the cluster IDs for each embedding.
        """
        if len(embeddings) <= dim + 1:
            # Avoid clustering when there's insufficient data
            return [np.array([0]) for _ in range(len(embeddings))]

        # Global dimensionality reduction
        reduced_embeddings_global = self.global_cluster_embeddings(embeddings, dim)
        # Global clustering
        global_clusters, n_global_clusters = self.GMM_cluster(
            reduced_embeddings_global, threshold
        )

        all_local_clusters = [np.array([]) for _ in range(len(embeddings))]
        total_clusters = 0

        # Iterate through each global cluster to perform local clustering
        for i in range(n_global_clusters):
            # Extract embeddings belonging to the current global cluster
            global_cluster_embeddings_ = embeddings[
                np.array([i in gc for gc in global_clusters])
            ]

            if len(global_cluster_embeddings_) == 0:
                continue
            if len(global_cluster_embeddings_) <= dim + 1:
                # Handle small clusters with direct assignment
                local_clusters = [np.array([0]) for _ in global_cluster_embeddings_]
                n_local_clusters = 1
            else:
                # Local dimensionality reduction and clustering
                reduced_embeddings_local = self.local_cluster_embeddings(
                    global_cluster_embeddings_, dim
                )
                local_clusters, n_local_clusters = self.GMM_cluster(
                    reduced_embeddings_local, threshold
                )

            # Assign local cluster IDs, adjusting for total clusters already processed
            for j in range(n_local_clusters):
                local_cluster_embeddings_ = global_cluster_embeddings_[
                    np.array([j in lc for lc in local_clusters])
                ]
                indices = np.where(
                    (embeddings == local_cluster_embeddings_[:, None]).all(-1)
                )[1]
                for idx in indices:
                    all_local_clusters[idx] = np.append(
                        all_local_clusters[idx], j + total_clusters
                    )

            total_clusters += n_local_clusters

        return all_local_clusters

    ### --- Our code below --- ###

    def embed(self, texts):
        """
        Generate embeddings for a list of text documents.

        This function assumes the existence of an `embd` object with a method `embed_documents`
        that takes a list of texts and returns their embeddings.

        Parameters:
        - texts: List[str], a list of text documents to be embedded.

        Returns:
        - numpy.ndarray: An array of embeddings for the given text documents.
        """
        text_embeddings = self.embd.embed_documents(texts)
        text_embeddings_np = np.array(text_embeddings)
        return text_embeddings_np

    def embed_cluster_texts(self, texts):
        """
        Embeds a list of texts and clusters them, returning a DataFrame with texts, their embeddings, and cluster labels.

        This function combines embedding generation and clustering into a single step. It assumes the existence
        of a previously defined `perform_clustering` function that performs clustering on the embeddings.

        Parameters:
        - texts: List[str], a list of text documents to be processed.

        Returns:
        - pandas.DataFrame: A DataFrame containing the original texts, their embeddings, and the assigned cluster labels.
        """
        text_embeddings_np = self.embed(texts)  # Generate embeddings
        cluster_labels = self.perform_clustering(
            text_embeddings_np, 10, 0.1
        )  # Perform clustering on the embeddings
        df = pd.DataFrame()  # Initialize a DataFrame to store the results
        df["text"] = texts  # Store original texts
        df["embd"] = list(
            text_embeddings_np
        )  # Store embeddings as a list in the DataFrame
        df["cluster"] = cluster_labels  # Store cluster labels
        return df

    def fmt_txt(self, df: pd.DataFrame) -> str:
        """
        Formats the text documents in a DataFrame into a single string.

        Parameters:
        - df: DataFrame containing the 'text' column with text documents to format.

        Returns:
        - A single string where all text documents are joined by a specific delimiter.
        """
        unique_txt = df["text"].tolist()
        return "--- --- \n --- --- ".join(unique_txt)

    def embed_cluster_summarize_texts(
        self, texts: List[str], level: int
    ) -> Tuple[pd.DataFrame, pd.DataFrame]:
        """
        Embeds, clusters, and summarizes a list of texts. This function first generates embeddings for the texts,
        clusters them based on similarity, expands the cluster assignments for easier processing, and then summarizes
        the content within each cluster.

        Parameters:
        - texts: A list of text documents to be processed.
        - level: An integer parameter that could define the depth or detail of processing.

        Returns:
        - Tuple containing two DataFrames:
        1. The first DataFrame (`df_clusters`) includes the original texts, their embeddings, and cluster assignments.
        2. The second DataFrame (`df_summary`) contains summaries for each cluster, the specified level of detail,
            and the cluster identifiers.
        """

        # Embed and cluster the texts, resulting in a DataFrame with 'text', 'embd', and 'cluster' columns
        df_clusters = self.embed_cluster_texts(texts)

        # Prepare to expand the DataFrame for easier manipulation of clusters
        expanded_list = []

        # Expand DataFrame entries to document-cluster pairings for straightforward processing
        for index, row in df_clusters.iterrows():
            for cluster in row["cluster"]:
                expanded_list.append(
                    {"text": row["text"], "embd": row["embd"], "cluster": cluster}
                )

        # Create a new DataFrame from the expanded list
        expanded_df = pd.DataFrame(expanded_list)

        # Retrieve unique cluster identifiers for processing
        all_clusters = expanded_df["cluster"].unique()

        print(f"--Generated {len(all_clusters)} clusters--")

        # Summarization
        template = """Here is a sub-set of LangChain Expression Langauge doc.

        LangChain Expression Langauge provides a way to compose chain in LangChain.

        Give a detailed summary of the documentation provided.

        Documentation:
        {context}
        """
        prompt = ChatPromptTemplate.from_template(template)
        chain = prompt | self.model | StrOutputParser()

        # Format text within each cluster for summarization
        summaries = []
        for i in all_clusters:
            df_cluster = expanded_df[expanded_df["cluster"] == i]
            formatted_txt = self.fmt_txt(df_cluster)
            summaries.append(chain.invoke({"context": formatted_txt}))

        # Create a DataFrame to store summaries with their corresponding cluster and level
        df_summary = pd.DataFrame(
            {
                "summaries": summaries,
                "level": [level] * len(summaries),
                "cluster": list(all_clusters),
            }
        )

        return df_clusters, df_summary

    def recursive_embed_cluster_summarize(
        self, texts: List[str], level: int = 1, n_levels: int = 3
    ) -> Dict[int, Tuple[pd.DataFrame, pd.DataFrame]]:
        """
        Recursively embeds, clusters, and summarizes texts up to a specified level or until
        the number of unique clusters becomes 1, storing the results at each level.

        Parameters:
        - texts: List[str], texts to be processed.
        - level: int, current recursion level (starts at 1).
        - n_levels: int, maximum depth of recursion.

        Returns:
        - Dict[int, Tuple[pd.DataFrame, pd.DataFrame]], a dictionary where keys are the recursion
        levels and values are tuples containing the clusters DataFrame and summaries DataFrame at that level.
        """
        results = {}  # Dictionary to store results at each level

        # Perform embedding, clustering, and summarization for the current level
        df_clusters, df_summary = self.embed_cluster_summarize_texts(texts, level)

        # Store the results of the current level
        results[level] = (df_clusters, df_summary)

        # Determine if further recursion is possible and meaningful
        unique_clusters = df_summary["cluster"].nunique()
        if level < n_levels and unique_clusters > 1:
            # Use summaries as the input texts for the next level of recursion
            new_texts = df_summary["summaries"].tolist()
            next_level_results = self.recursive_embed_cluster_summarize(
                new_texts, level + 1, n_levels
            )

            # Merge the results from the next level into the current results dictionary
            results.update(next_level_results)

        return results

    def text_spliter(self, text, chunk_size_tok=2000, level=1, n_levels=3):
        """
        Parameters:
        - text: str, text to be processed.
        - chunk_size_tok: int, size of each chunk in tokens.
        - level: int, current recursion level (starts at 1).
        - n_levels: int, maximum depth of recursion.
        Returns:
        - List[str], all texts after recursive embedding, clustering, and summarization.
        """
        if text is None:
            raise ValueError("Text cannot be None.")

        text_splitter = RecursiveCharacterTextSplitter.from_tiktoken_encoder(
            chunk_size=chunk_size_tok, chunk_overlap=0
        )
        texts_split = text_splitter.split_text(text)
        if texts_split is None or len(texts_split) == 0:
            raise ValueError("Text splitting did not produce any text chunks.")

        results = self.recursive_embed_cluster_summarize(
            texts_split, level=level, n_levels=n_levels
        )
        if results is None:
            raise ValueError(
                "Recursive embedding and clustering did not produce any results."
            )

        all_texts = texts_split.copy()

        for level in sorted(results.keys()):
            # Extract summaries from the current level's DataFrame
            if results[level] is None or len(results[level]) != 2:
                raise ValueError(f"Unexpected results format at level {level}.")
            summaries = results[level][1]["summaries"].tolist()
            if summaries is None or len(summaries) == 0:
                raise ValueError(f"Level {level} did not produce any summaries.")
            # Extend all_texts with the summaries from the current level
            all_texts.extend(summaries)

        return all_texts


class ReportAnalysis:
    def __init__(self, ticker_symbol, fyear="latest"):
        if not (
            isinstance(fyear, int) or (isinstance(fyear, str) and fyear == "latest")
        ):
            raise ValueError(
                "fyear must be an integer representing year (e.g. 2023) or 'latest'"
            )

        self.ticker_symbol = ticker_symbol
        self.stock = yf.Ticker(ticker_symbol)
        self.info = self.stock.info
        self.report_address = self.get_sec_report_address(fyear)

        formatted_date = self.filing_date.strftime("%Y-%m-%d")
        self.project_dir = f"projects/{ticker_symbol}/{formatted_date}"
        self.cache_dir = f"projects/{ticker_symbol}/{formatted_date}/cache"
        os.makedirs(self.project_dir, exist_ok=True)
        os.makedirs(self.cache_dir, exist_ok=True)
        self.extractor = ExtractorApi(sec_api_key)

        self.system_prompt = """
            Role: Expert Investor
            Department: Finance
            Primary Responsibility: Generation of Customized Financial Analysis Reports

            Role Description:
            As an Expert Investor within the finance domain, your expertise is harnessed to develop bespoke Financial Analysis Reports that cater to specific client requirements. This role demands a deep dive into financial statements and market data to unearth insights regarding a company's financial performance and stability. Engaging directly with clients to gather essential information and continuously refining the report with their feedback ensures the final product precisely meets their needs and expectations.

            Key Objectives:

            Analytical Precision: Employ meticulous analytical prowess to interpret financial data, identifying underlying trends and anomalies.
            Effective Communication: Simplify and effectively convey complex financial narratives, making them accessible and actionable to non-specialist audiences.
            Client Focus: Dynamically tailor reports in response to client feedback, ensuring the final analysis aligns with their strategic objectives.
            Adherence to Excellence: Maintain the highest standards of quality and integrity in report generation, following established benchmarks for analytical rigor.
            Performance Indicators:
            The efficacy of the Financial Analysis Report is measured by its utility in providing clear, actionable insights. This encompasses aiding corporate decision-making, pinpointing areas for operational enhancement, and offering a lucid evaluation of the company's financial health. Success is ultimately reflected in the report's contribution to informed investment decisions and strategic planning.
        """

    def get_target_price(self):
        # API URL
        url = f"https://financialmodelingprep.com/api/v4/price-target?symbol={self.ticker_symbol}&apikey={fmp_api_key}"

        # 发送GET请求
        price_target = "Not Given"
        response = requests.get(url)

        # 确保请求成功
        if response.status_code == 200:
            # 解析JSON数据
            data = response.json()
            est = []

            for tprice in data:
                tdate = tprice["publishedDate"].split("T")[0]
                tdate = datetime.strptime(tdate, "%Y-%m-%d")
                if (tdate - self.filing_date).days == 1:
                    est.append(tprice["priceTarget"])

            price_target = f"{np.min(est)} - {np.max(est)} (md. {np.median(est)})"

        else:
            print("Failed to retrieve data:", response.status_code)

        return price_target

    def get_stock_performance(self):
        def fetch_stock_data(ticker):
            stock = yf.Ticker(ticker)
            start = (self.filing_date - timedelta(days=365)).strftime("%Y-%m-%d")
            end = self.filing_date.strftime("%Y-%m-%d")
            historical_data = stock.history(start=start, end=end)
            # hist = stock.history(period=period)
            return historical_data["Close"]

        target_close = fetch_stock_data(self.ticker_symbol)
        sp500_close = fetch_stock_data("^GSPC")

        # 计算变化率
        company_change = (
            (target_close - target_close.iloc[0]) / target_close.iloc[0] * 100
        )
        sp500_change = (sp500_close - sp500_close.iloc[0]) / sp500_close.iloc[0] * 100

        # 计算额外的日期点
        start_date = company_change.index.min()
        four_months = start_date + DateOffset(months=4)
        eight_months = start_date + DateOffset(months=8)
        end_date = company_change.index.max()

        # 准备绘图
        plt.rcParams.update({"font.size": 20})  # 调整为更大的字体大小
        plt.figure(figsize=(14, 7))
        plt.plot(
            company_change.index,
            company_change,
            label=f'{self.info["shortName"]} Change %',
            color="blue",
        )
        plt.plot(
            sp500_change.index, sp500_change, label="S&P 500 Change %", color="red"
        )

        # 设置标题和标签
        plt.title(f'{self.info["shortName"]} vs S&P 500 - Change % Over the Past Year')
        plt.xlabel("Date")
        plt.ylabel("Change %")

        # 设置x轴刻度标签
        plt.xticks(
            [start_date, four_months, eight_months, end_date],
            [
                start_date.strftime("%Y-%m"),
                four_months.strftime("%Y-%m"),
                eight_months.strftime("%Y-%m"),
                end_date.strftime("%Y-%m"),
            ],
        )

        plt.legend()
        plt.grid(True)
        plt.tight_layout()
        # plt.show()
        plot_path = f"{self.project_dir}/stock_performance.png"
        plt.savefig(plot_path)
        plt.close()
        return plot_path

    def get_pe_eps_performance(self):
        ss = self.get_income_stmt()
        eps = ss.loc["Diluted EPS", :]

        # 获取过去一年的历史数据
        # historical_data = self.stock.history(period="5y")
        start = (self.filing_date - timedelta(days=1826)).strftime("%Y-%m-%d")
        end = self.filing_date.strftime("%Y-%m-%d")
        historical_data = self.stock.history(start=start, end=end)

        # 指定的日期，并确保它们都是UTC时区的
        dates = pd.to_datetime(eps.index[::-1], utc=True)

        # 为了确保我们能够找到最接近的股市交易日，我们将转换日期并查找最接近的日期
        results = {}
        for date in dates:
            # 如果指定日期不是交易日，使用bfill和ffill找到最近的交易日股价
            if date not in historical_data.index:
                close_price = historical_data.asof(date)
            else:
                close_price = historical_data.loc[date]

            results[date] = close_price["Close"]

        pe = [p / e for p, e in zip(results.values(), eps.values[::-1])]
        dates = eps.index[::-1]
        eps = eps.values[::-1]

        # 创建图形和轴对象
        fig, ax1 = plt.subplots(figsize=(14, 7))
        plt.rcParams.update({"font.size": 20})  # 调整为更大的字体大小

        # 绘制市盈率
        color = "tab:blue"
        ax1.set_xlabel("Date")
        ax1.set_ylabel("PE Ratio", color=color)
        ax1.plot(dates, pe, color=color)
        ax1.tick_params(axis="y", labelcolor=color)
        ax1.grid(True)

        # 创建与ax1共享x轴的第二个轴对象
        ax2 = ax1.twinx()
        color = "tab:red"
        ax2.set_ylabel("EPS", color=color)  # 第二个y轴的标签
        ax2.plot(dates, eps, color=color)
        ax2.tick_params(axis="y", labelcolor=color)

        # 设置标题和x轴标签角度
        plt.title(f'{self.info["shortName"]} PE Ratios and EPS Over the Past 4 Years')
        plt.xticks(rotation=45)

        # 设置x轴刻度标签
        plt.xticks(dates, [d.strftime("%Y-%m") for d in dates])

        plt.tight_layout()
        # plt.show()
        plot_path = f"{self.project_dir}/pe_performance.png"
        plt.savefig(plot_path)
        plt.close()
        return plot_path

    def get_sec_report_address(self, fyear):

        url = f"https://financialmodelingprep.com/api/v3/sec_filings/{self.ticker_symbol}?type=10-k&page=0&apikey={fmp_api_key}"

        # 发送GET请求
        filing_address = None
        response = requests.get(url)

        # 确保请求成功
        if response.status_code == 200:
            # 解析JSON数据
            data = response.json()
            if fyear == "latest":
                filing_address = data[0]["finalLink"]
                filing_date = data[0]["fillingDate"]
                # self.filing_date = filing_date
            else:
                for filing in data:
                    if filing["filingDate"].split("-")[0] == fyear:
                        filing_address = filing["finalLink"]
                        filing_date = filing["filingDate"]
                        break

            self.filing_date = datetime.strptime(filing_date, "%Y-%m-%d %H:%M:%S")
        else:
            print("Failed to retrieve data:", response.status_code)

        return filing_address

    def get_next_weekday(sellf, date):
        # date = datetime.strptime(date_str, "%Y-%m-%d")

        if date.weekday() >= 5:
            days_to_add = 7 - date.weekday()
            next_weekday = date + timedelta(days=days_to_add)
            return next_weekday
        else:
            return date

    def get_historical_market_cap(self):
        date = self.get_next_weekday(self.filing_date).strftime("%Y-%m-%d")
        url = f"https://financialmodelingprep.com/api/v3/historical-market-capitalization/{self.ticker_symbol}?limit=100&from={date}&to={date}&apikey={fmp_api_key}"

        # 发送GET请求
        mkt_cap = None
        response = requests.get(url)

        # 确保请求成功
        if response.status_code == 200:
            # 解析JSON数据
            data = response.json()
            mkt_cap = data[0]["marketCap"]
        else:
            print("Failed to retrieve data:", response.status_code)

        return mkt_cap

    def get_historical_bvps(self, target_date):
        # 从FMP API获取历史关键财务指标数据
        url = f"https://financialmodelingprep.com/api/v3/key-metrics/{self.ticker_symbol}?limit=40&apikey={fmp_api_key}"
        response = requests.get(url)
        data = response.json()

        if not data:
            return "No data available"

        # 找到最接近目标日期的数据
        closest_data = None
        min_date_diff = float("inf")
        for entry in data:
            date_of_data = datetime.strptime(entry["date"], "%Y-%m-%d")
            date_diff = abs(target_date - date_of_data).days
            if date_diff < min_date_diff:
                min_date_diff = date_diff
                closest_data = entry

        if closest_data:
            return closest_data.get("bookValuePerShare", "No BVPS data available")
        else:
            return "No close date data found"

    def get_key_data(self):
        # Fetch historical market data for the past 6 months
        start = (self.filing_date - timedelta(weeks=52)).strftime("%Y-%m-%d")
        end = self.filing_date.strftime("%Y-%m-%d")

        hist = self.stock.history(start=start, end=end)

        # 获取其他相关信息
        info = self.info
        close_price = hist["Close"].iloc[-1]

        # Calculate the average daily trading volume
        six_months_start = (self.filing_date - timedelta(weeks=26)).strftime("%Y-%m-%d")
        hist_last_6_months = hist[
            (hist.index >= six_months_start) & (hist.index <= end)
        ]

        # 计算这6个月的平均每日交易量
        avg_daily_volume_6m = (
            hist_last_6_months["Volume"].mean()
            if not hist_last_6_months["Volume"].empty
            else 0
        )

        fiftyTwoWeekLow = hist["High"].min()
        fiftyTwoWeekHigh = hist["Low"].max()

        # avg_daily_volume_6m = hist['Volume'].mean()

        # Print the result
        # print(f"Over the past 6 months, the average daily trading volume for {ticker_symbol} was: {avg_daily_volume_6m:.2f}")
        rating, _ = ra.get_analyst_recommendations()
        target_price = ra.get_target_price()
        result = {
            "Rating": rating,
            "Target Price": target_price,
            f"6m avg daily vol ({info['currency']}mn)": "{:.2f}".format(
                avg_daily_volume_6m / 1e6
            ),
            f"Closing Price ({info['currency']})": "{:.2f}".format(close_price),
            f"Market Cap ({info['currency']}mn)": "{:.2f}".format(
                self.get_historical_market_cap() / 1e6
            ),
            f"52 Week Price Range ({info['currency']})": "{:.2f} - {:.2f}".format(
                fiftyTwoWeekLow, fiftyTwoWeekHigh
            ),
            f"BVPS ({info['currency']})": "{:.2f}".format(
                self.get_historical_bvps(self.filing_date)
            ),
        }
        return result

    def get_company_info(self):
        info = self.info
        result = {
            "Company Name": info["shortName"],
            "Industry": info["industry"],
            "Sector": info["sector"],
            "Country": info["country"],
            "Website": info["website"],
        }
        return result

    def get_income_stmt(self):
        income_stmt = self.stock.financials
        return income_stmt

    def get_balance_sheet(self):
        balance_sheet = self.stock.balance_sheet
        return balance_sheet

    def get_cash_flow(self):
        cash_flow = self.stock.cashflow
        return cash_flow

    def get_financial_metrics(self, years=4):
        # Base URL setup for FMP API
        base_url = "https://financialmodelingprep.com/api/v3"
        # Create DataFrame
        df = pd.DataFrame()

        # Iterate over the last 'years' years of data
        for year_offset in range(years):
            # Construct URL for income statement and ratios for each year
            income_statement_url = f"{base_url}/income-statement/{self.ticker_symbol}?limit={years}&apikey={fmp_api_key}"
            ratios_url = f"{base_url}/ratios/{self.ticker_symbol}?limit={years}&apikey={fmp_api_key}"
            key_metrics_url = f"{base_url}/key-metrics/{self.ticker_symbol}?limit={years}&apikey={fmp_api_key}"

            # Requesting data from the API
            income_data = requests.get(income_statement_url).json()
            key_metrics_data = requests.get(key_metrics_url).json()
            ratios_data = requests.get(ratios_url).json()

            # Extracting needed metrics for each year
            if income_data and key_metrics_data and ratios_data:
                metrics = {
                    "Operating Revenue": income_data[year_offset]["revenue"] / 1e6,
                    "Adjusted Net Profit": income_data[year_offset]["netIncome"] / 1e6,
                    "Adjusted EPS": income_data[year_offset]["eps"],
                    "EBIT Margin": ratios_data[year_offset]["ebitPerRevenue"],
                    "ROE": key_metrics_data[year_offset]["roe"],
                    "PE Ratio": ratios_data[year_offset]["priceEarningsRatio"],
                    "EV/EBITDA": key_metrics_data[year_offset][
                        "enterpriseValueOverEBITDA"
                    ],
                    "PB Ratio": key_metrics_data[year_offset]["pbRatio"],
                }
                # Append the year and metrics to the DataFrame
                year = income_data[year_offset]["date"][
                    :4
                ]  # Extracting the year from the date
                df[year] = pd.Series(metrics)
        df = df.sort_index(axis=1)
        df = df.round(2)
        return df

    def get_risk_assessment(self):
        cache_answer = f"{self.project_dir}/risk_assessment.txt"
        if USE_CACHE and os.path.exists(cache_answer):
            with open(cache_answer, "r") as f:
                answer = f.read()
        else:
            company_name = self.info["shortName"]
            risk_factors = self.get_10k_section("1A")
            section_text = (
                "Company Name: "
                + company_name
                + "\n\n"
                + "Risk factors:"
                + risk_factors
                + "\n\n"
            )
            question = "According to the given information, summarise the top 3 key risks of the company. Less than 100 words."
            answer = self.ask_question(question, section_text, use_rag=False)
            with open(cache_answer, "w") as f:
                f.write(answer)
        return answer

    def get_analyst_recommendations(self):
        recommendations = self.stock.recommendations
        row_0 = recommendations.iloc[0, 1:]  # Exclude 'period' column

        # Find the maximum voting result
        max_votes = row_0.max()
        majority_voting_result = row_0[row_0 == max_votes].index.tolist()

        return majority_voting_result[0], max_votes

    def get_earnings(self, quarter, year):
        earnings = get_earnings_transcript(quarter, self.ticker_symbol, year)
        return earnings

    def get_10k_section(self, section):
        """
        Get 10-K reports from SEC EDGAR
        """
        if section not in [
            1,
            "1A",
            "1B",
            2,
            3,
            4,
            5,
            6,
            7,
            "7A",
            8,
            9,
            "9A",
            "9B",
            10,
            11,
            12,
            13,
            14,
            15,
        ]:
            raise ValueError(
                "Section must be in [1, 1A, 1B, 2, 3, 4, 5, 6, 7, 7A, 8, 9, 9A, 9B, 10, 11, 12, 13, 14, 15]"
            )

        section = str(section)
        os.makedirs(f"{self.project_dir}/10k", exist_ok=True)

        report_name = f"{self.project_dir}/10k/section_{section}.txt"

        if USE_CACHE and os.path.exists(report_name):
            with open(report_name, "r") as f:
                section_text = f.read()
        else:
            section_text = self.extractor.get_section(
                self.report_address, section, "text"
            )

            with open(report_name, "w") as f:
                f.write(section_text)

        return section_text

    def get_10k_rag(self, section):
        # Now, use all_texts to build the vectorstore with Chroma
        vector_dir = f"{self.cache_dir}/section_{section}_vectorstore"
        if USE_CACHE and os.path.exists(vector_dir):
            vectorstore = Chroma(persist_directory=vector_dir, embedding_function=embd)
            vectorstore.get()
        else:
            section_text = self.get_10k_section(section)
            all_texts = rag_helper.text_spliter(
                section_text, chunk_size_tok=2000, level=1, n_levels=3
            )

            vectorstore = Chroma.from_texts(
                texts=all_texts, embedding=embd, persist_directory=vector_dir
            )
            vectorstore.persist()

        retriever = vectorstore.as_retriever()

        # Prompt
        prompt = hub.pull("rlm/rag-prompt")

        # Chain
        rag_chain = (
            # {"context": retriever | format_docs, "question": RunnablePassthrough()}
            {"context": retriever, "question": RunnablePassthrough()}
            | prompt
            | model
            | StrOutputParser()
        )

        # Question
        # rag_chain.invoke("What is the profit of the company. you should not say you don't know because all the required information is in the context")
        # rag_chain.invoke("Analyse the income statement of the company for the year 2023")
        return rag_chain

    def analyze_business_highlights(self):
        cache_answer = f"{self.project_dir}/business_highlights_analysis.txt"
        if USE_CACHE and os.path.exists(cache_answer):
            with open(cache_answer, "r") as f:
                answer = f.read()
        else:
            business_summary = self.get_10k_section(1)
            section_7 = self.get_10k_section(7)
            section_text = (
                "Business summary:"
                + business_summary
                + "\n\n"
                + "Management's Discussion and Analysis of Financial Condition and Results of Operations :"
                + section_7
            )
            question = "According to the given information, describe the performance highlights per business of the company. Each business description should contain one sentence of a summarization and one sentence of explaination. Less than 130 words."
            answer = self.ask_question(question, section_text, use_rag=False)
            with open(cache_answer, "w") as f:
                f.write(answer)
        return answer

    def analyze_company_description(self):
        cache_answer = f"{self.project_dir}/company_description_analysis.txt"
        if USE_CACHE and os.path.exists(cache_answer):
            with open(cache_answer, "r") as f:
                answer = f.read()
        else:
            company_name = self.info["shortName"]
            business_summary = self.get_10k_section(1)
            section_7 = self.get_10k_section(7)
            section_text = (
                "Company Name: "
                + company_name
                + "\n\n"
                + "Business summary:"
                + business_summary
                + "\n\n"
                + "Management's Discussion and Analysis of Financial Condition and Results of Operations :"
                + section_7
            )
            question = "According to the given information, \n 1. briefly describe the company’s industry, \n 2. highlighting core strengths and competitive advantages key products or services, \n3. Identify current industry trends, opportunities, and challenges that influence the company’s strategy.\
                4. Outline recent strategic initiatives such as product launches, acquisitions, or new partnerships, and describe the company's response to market conditions. Less than 400 words."
            step_answer = self.ask_question(question, section_text, use_rag=False)
            question2 = "summarise the analysis, less than 130 words"
            answer = self.ask_question(question=question2, resource=step_answer)
            with open(cache_answer, "w") as f:
                f.write(answer)
        return answer

    def income_summarization(self):
        income_stmt_analysis = self.analyze_income_stmt()
        segment_analysis = self.analyze_segment_stmt()

        cache_answer = f"{self.project_dir}/income_summarization.txt"
        if USE_CACHE and os.path.exists(cache_answer):
            with open(cache_answer, "r") as f:
                answer = f.read()
        else:
            question = f"Income statement analysis: {income_stmt_analysis}, \
            Segment analysis: {segment_analysis}, \
            Synthesize the findings from the in-depth income statement analysis and segment analysis into a single, coherent paragraph. It should be fact-based and data-driven. First, present and assess overall revenue and profit situation, noting significant trends and changes. Second, examine the performance of the various business segments, with an emphasis on their revenue and profit changes, revenue contributions and market dynamics. For information not covered in the first two areas, identify and integrate key findings related to operation, potential risks and strategic opportunities for growth and stability into the analysis. For each part, integrate historical data comparisons and provide relevant facts, metrics or data as evidence. The entire synthesis should be presented as a continuous paragraph without the use of bullet points. Use subtitles and numbering for each key point. The total output should be less than 160 words."
            section_text = self.get_10k_section(7)

            answer = self.ask_question(question, section_text, use_rag=False)
            with open(cache_answer, "w") as f:
                f.write(answer)
        return answer

    def analyze_segment_stmt(self):
        cache_answer = f"{self.project_dir}/business_segment_analysis.txt"
        if USE_CACHE and os.path.exists(cache_answer):
            with open(cache_answer, "r") as f:
                answer = f.read()
        else:
            income_stmt = self.get_income_stmt()
            df_string = (
                "Income statement(Segment Analysis):" + income_stmt.to_string().strip()
            )

            question = "Identify the company's business segments and create a segment analysis using the Management's Discussion and Analysis and the income statement, subdivided by segment with clear headings. Address revenue and net profit with specific data, and calculate the changes. Detail strategic partnerships and their impacts, including details like the companies or organizations. Describe product innovations and their effects on income growth. Quantify market share and its changes, or state market position and its changes. Analyze market dynamics and profit challenges, noting any effects from national policy changes. Include the cost side, detailing operational costs, innovation investments, and expenses from channel expansion, etc. Support each statement with evidence, keeping each segment analysis concise and under 60 words, accurately sourcing information. For each segment, consolidate the most significant findings into one clear, concise paragraph, excluding less critical or vaguely described aspects to ensure clarity and reliance on evidence-backed information. For each segment, the output should be one single paragraph within 150 words."
            section_text = self.get_10k_section(7)

            answer = self.ask_question(question, section_text, df_string, use_rag=False)
            with open(cache_answer, "w") as f:
                f.write(answer)
        return answer

    def analyze_income_stmt(self):
        cache_answer = f"{self.project_dir}/income_stmt_analysis.txt"
        if USE_CACHE and os.path.exists(cache_answer):
            with open(cache_answer, "r") as f:
                answer = f.read()
        else:
            income_stmt = self.get_income_stmt()
            df_string = "Income statement:" + income_stmt.to_string().strip()

            question = "Conduct a comprehensive analysis of the company's income statement for the current fiscal year. Start with an overall revenue record, including Year-over-Year or Quarter-over-Quarter comparisons, and break down revenue sources to identify primary contributors and trends. Examine the Cost of Goods Sold for potential cost control issues. Review profit margins such as gross, operating, and net profit margins to evaluate cost efficiency, operational effectiveness, and overall profitability. Analyze Earnings Per Share to understand investor perspectives. Compare these metrics with historical data and industry or competitor benchmarks to identify growth patterns, profitability trends, and operational challenges. The output should be a strategic overview of the company’s financial health in a single paragraph, less than 130 words, summarizing the previous analysis into 4-5 key points under respective subheadings with specific discussion and strong data support."

            section_text = self.get_10k_section(7)
            answer = self.ask_question(question, section_text, df_string, use_rag=False)
            with open(cache_answer, "w") as f:
                f.write(answer)
        return answer

    def analyze_balance_sheet(self):
        cache_answer = f"{self.project_dir}/balance_sheet_analysis.txt"
        if USE_CACHE and os.path.exists(cache_answer):
            with open(cache_answer, "r") as f:
                answer = f.read()
        else:
            balance_sheet = self.get_balance_sheet()
            df_string = "Balance sheet:" + balance_sheet.to_string().strip()

            question = "Delve into a detailed scrutiny of the company's balance sheet for the most recent fiscal year, pinpointing the structure of assets, liabilities, and shareholders' equity to decode the firm's financial stability and operational efficiency. Focus on evaluating the liquidity through current assets versus current liabilities, the solvency via long-term debt ratios, and the equity position to gauge long-term investment potential. Contrast these metrics with previous years' data to highlight financial trends, improvements, or deteriorations. Finalize with a strategic assessment of the company's financial leverage, asset management, and capital structure, providing insights into its fiscal health and future prospects in a single paragraph. Less than 130 words."

            section_text = self.get_10k_section(7)
            answer = self.ask_question(question, section_text, df_string, use_rag=False)
            with open(cache_answer, "w") as f:
                f.write(answer)
        return answer

    def analyze_cash_flow(self):
        cache_answer = f"{self.project_dir}/cash_flow_analysis.txt"
        if USE_CACHE and os.path.exists(cache_answer):
            with open(cache_answer, "r") as f:
                answer = f.read()
        else:
            cash_flow = self.get_cash_flow()
            df_string = "Balance sheet:" + cash_flow.to_string().strip()

            question = "Dive into a comprehensive evaluation of the company's cash flow for the latest fiscal year, focusing on cash inflows and outflows across operating, investing, and financing activities. Examine the operational cash flow to assess the core business profitability, scrutinize investing activities for insights into capital expenditures and investments, and review financing activities to understand debt, equity movements, and dividend policies. Compare these cash movements to prior periods to discern trends, sustainability, and liquidity risks. Conclude with an informed analysis of the company's cash management effectiveness, liquidity position, and potential for future growth or financial challenges in a single paragraph. Less than 130 words."

            section_text = self.get_10k_section(7)
            answer = self.ask_question(question, section_text, df_string, use_rag=False)
            with open(cache_answer, "w") as f:
                f.write(answer)
        return answer

    def financial_summarization(self):
        income_stmt_analysis = self.analyze_income_stmt()
        balance_sheet_analysis = self.analyze_balance_sheet()
        cash_flow_analysis = self.analyze_cash_flow()

        cache_answer = f"{self.project_dir}/financial_summarization.txt"
        if USE_CACHE and os.path.exists(cache_answer):
            with open(cache_answer, "r") as f:
                answer = f.read()
        else:
            question = f"Income statement analysis: {income_stmt_analysis}, \
            Balance sheet analysis: {balance_sheet_analysis}, \
            Cash flow analysis: {cash_flow_analysis}, \
            Synthesize the findings from the in-depth analysis of the income statement, balance sheet, and cash flow for the latest fiscal year. Highlight the core insights regarding the company's operational performance, financial stability, and cash management efficiency. Discuss the interrelations between revenue growth, cost management strategies, and their impact on profitability as revealed by the income statement. Incorporate the balance sheet's insights on financial structure, liquidity, and solvency to provide a comprehensive view of the company's financial health. Merge these with the cash flow analysis to illustrate the company's liquidity position, investment activities, and financing strategies. Conclude with a holistic assessment of the company's fiscal health, identifying strengths, potential risks, and strategic opportunities for growth and stability. Offer recommendations to address identified challenges and capitalize on the opportunities to enhance shareholder value in a single paragraph. Less than 150 words."
            section_text = self.get_10k_section(7)
            answer = self.ask_question(question, section_text, use_rag=False)
            with open(cache_answer, "w") as f:
                f.write(answer)
        return {
            "Income Statement Analysis": income_stmt_analysis,
            "Balance Sheet Analysis": balance_sheet_analysis,
            "Cash Flow Analysis": cash_flow_analysis,
            "Financial Summary": answer,
        }

    def ask_question(self, question, resource, table_str=None, use_rag=False):
        if use_rag:
            answer = "RAG in progress"
            # rag_chain = self.get_10k_rag(section)
            # if table_str:
            #     prompt = f"{self.system_prompt}\n\n{table_str}\n\nQuestion: {question}"
            # else:
            #     prompt = f"{self.system_prompt}\n\nQuestion: {question}"
            # answer = rag_chain.invoke(prompt)
        else:
            # 发送请求给OpenAI API使用指定的模型
            # section_text = self.get_10k_section(section)
            if table_str:
                prompt = f"{self.system_prompt}\n\n{table_str}\n\nResource: {resource}\n\nQuestion: {question}"
            else:
                prompt = f"{self.system_prompt}\n\nResource: {resource}\n\nQuestion: {question}"

            chat_completion = client.chat.completions.create(
                messages=[
                    {
                        "role": "user",
                        "content": prompt.strip(),
                    }
                ],
                # model="gpt-4-1106-preview",
                model=llm,
                temperature=0,
                max_tokens=500,
                # response_format={ "type": "json_object" },
            )
            answer = chat_completion.choices[0].message.content

        return answer
