import numpy as np
import pandas as pd
from sklearn.decomposition import PCA
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.preprocessing import StandardScaler
from sklearn.cluster import SpectralClustering, KMeans
from sklearn.metrics.pairwise import cosine_similarity, euclidean_distances
from sklearn.metrics import silhouette_score, calinski_harabasz_score
from scipy.sparse import hstack
import matplotlib.pyplot as plt
from utils.getPublicData import getjobData
import os


def silhouette_method(features, max_clusters=8):
    """
    Silhouette coefficient method to determine the optimal number of clusters, with parameters 
    consistent with the formal spectral clustering.

    Parameters:
    features (array-like): Feature data to be clustered.
    max_clusters (int): Maximum number of clusters, default is 8.

    Returns:
    int: Optimal number of clusters.
    """
    # If there are 1 or fewer data points, return 1
    if features.shape[0] <= 1:
        return 1

    # If features are not a numpy array, convert to dense array
    if not isinstance(features, np.ndarray):
        features = features.toarray()

    # Unified spectral clustering parameter configuration
    spectral_params = {
        'n_clusters': 2,  # Initial value will be overridden
        'affinity': 'cosine',
        'random_state': 42,
        'n_init': 10,
        'eigen_solver': 'arpack'
    }

    # Unified dimensionality reduction method (consistent with subsequent visualization)
    pca = PCA(n_components=2) if features.shape[1] > 2 else None
    viz_data = pca.fit_transform(features) if pca else features.copy()

    # Create a figure with three subplots
    plt.figure(figsize=(15, 5))

    # Plot PCA-reduced data distribution
    plt.subplot(1, 3, 1)
    plt.scatter(viz_data[:, 0], viz_data[:, 1], alpha=0.6)
    plt.title("PCA Data Distribution")

    # Evaluate different k values
    valid_ks = []
    valid_scores = []
    max_clusters = min(max_clusters, features.shape[0] - 1)

    for k in range(2, max_clusters + 1):
        try:
            # Use parameters identical to formal clustering
            model = SpectralClustering(**{**spectral_params, 'n_clusters': k})
            labels = model.fit_predict(features)

            # Skip if number of clusters is less than 2
            if len(np.unique(labels)) < 2:
                continue

            # Calculate silhouette coefficient
            score = silhouette_score(features, labels, metric='cosine')
            valid_ks.append(k)
            valid_scores.append(score)
        except Exception as e:
            print(f"k={k} failed: {str(e)}")
            continue

    # Visualize silhouette coefficient curve
    plt.subplot(1, 3, 2)
    best_k = valid_ks[np.argmax(valid_scores)] if valid_scores else 1
    if valid_scores:
        plt.plot(valid_ks, valid_scores, 'bo-')
        plt.axvline(best_k, color='r', linestyle='--')
        plt.title(f"Silhouette Score (Best k={best_k})")
    else:
        plt.text(0.5, 0.5, "No valid scores", ha='center')

    # Cluster effect preview
    plt.subplot(1, 3, 3)
    if valid_scores:
        model = SpectralClustering(**{**spectral_params, 'n_clusters': best_k})
        labels = model.fit_predict(features)
        for label in np.unique(labels):
            plt.scatter(viz_data[labels == label, 0],
                        viz_data[labels == label, 1],
                        label=f'Cluster {label}')
        plt.title("Cluster Preview")
        plt.legend()

    plt.tight_layout()
    plt.savefig('silhouette_method_result.png')
    plt.close()

    return best_k if valid_scores else 1


def elbow_method(features, max_clusters=10):
    """
    Elbow method for KMeans to determine the optimal number of clusters.

    Parameters:
    features (array-like): Feature data to be clustered.
    max_clusters (int): Maximum number of clusters, default is 10.

    Returns:
    int: Optimal number of clusters.
    """
    # If there are 1 or fewer data points, return 1
    if features.shape[0] <= 1:
        return 1

    scores = []
    for k in range(1, max_clusters + 1):
        try:
            # Initialize KMeans model
            model = KMeans(n_clusters=k, random_state=42, n_init=10)
            model.fit(features)
            scores.append(model.inertia_)
        except Exception as e:
            print(f"Error at k={k}: {e}")
            scores.append(np.inf)

    # Calculate point of maximum curvature (improved elbow detection)
    x = np.arange(1, len(scores) + 1)
    y = np.array(scores)

    def compute_elbow_point(x, y):
        """
        Calculate the elbow point.

        Parameters:
        x (array-like): x-axis data.
        y (array-like): y-axis data.

        Returns:
        int: The k value corresponding to the elbow point.
        """
        n_points = len(y)
        if n_points < 3:
            return 1
        # Coordinates of start and end points
        x1, y1 = x[0], y[0]
        x2, y2 = x[-1], y[-1]
        # Line equation coefficients: Ax + By + C = 0
        A = y2 - y1
        B = x1 - x2
        C = x2 * y1 - x1 * y2
        # Calculate distance from each point to the line
        distances = []
        for i in range(n_points):
            distance = np.abs(A * x[i] + B * y[i] + C) / np.sqrt(A ** 2 + B ** 2)
            distances.append(distance)
        return np.argmax(distances) + 1  # Convert index to k value

    elbow = compute_elbow_point(x, y)
    elbow = max(2, min(elbow, max_clusters))  # Ensure k≥2

    # Plot elbow graph
    plt.figure(figsize=(10, 6))
    plt.plot(x, y, 'bo-', label='Inertia')
    plt.axvline(elbow, color='r', linestyle='--', label=f'Elbow: k={elbow}')
    plt.title("Elbow Method for KMeans Clustering")
    plt.xlabel("Number of Clusters")
    plt.ylabel("Inertia")
    plt.legend()
    plt.savefig('elbow_method_result.png')
    plt.close()

    return int(elbow)


class ContentBasedRecall:
    def __init__(self):
        """
        基于内容的召回系统初始化。
        """
        # 从数据库获取数据并转换为DataFrame
        job_data_list = list(getjobData())
        self.job_data = pd.DataFrame(job_data_list)
        self.vectorizer = TfidfVectorizer()
        self._prepare_data()

    def _prepare_data(self):
        """
        数据预处理。
        """
        self.job_data = self.job_data.fillna({
            'title': '',
            'workTag': '',
            'maxSalary': 0,
            'minSalary': 0,
            'educational': '学历不限',
            'workExperience': '经验不限'
        })
        # 确保薪资字段为浮点数类型
        self.job_data['maxSalary'] = self.job_data['maxSalary'].astype(float)
        self.job_data['minSalary'] = self.job_data['minSalary'].astype(float)
        # 组合标题和工作标签作为文本特征
        self.job_texts = self.job_data["title"] + " " + self.job_data["workTag"]
        self.job_features = self.vectorizer.fit_transform(self.job_texts)

    def get_recall(self, user_collect_data, top_n=100):
        """
        根据用户收藏数据进行召回。

        参数:
        user_collect_data (list): 用户收藏的岗位数据。
        top_n (int): 召回的岗位数量，默认为100。

        返回:
        list: 召回的岗位列表。
        """
        result_list = []
        for item in user_collect_data:
            jid = item['jid']
            job_item = self.job_data[self.job_data['id'] == jid]
            if job_item.empty:
                continue

            job_item = job_item.iloc[0]
            input_text = job_item['title'] + " " + job_item['workTag']
            input_feature = self.vectorizer.transform([input_text])

            similarities = cosine_similarity(self.job_features, input_feature)
            top_indices = similarities.flatten().argsort()[::-1][:top_n + 1]

            for index in top_indices:
                job = self.job_data.iloc[index]
                result_list.append({
                    "jid": int(job['id']),
                    "title": job['title'],
                    "workTag": job['workTag'],
                    "maxSalary": float(job['maxSalary']),
                    "minSalary": float(job['minSalary']),
                    "educational": job['educational'],
                    "workExperience": job['workExperience']
                })

        seen = set()
        unique_results = []
        for item in result_list:
            key = (item['jid'], item['title'])
            if key not in seen:
                seen.add(key)
                unique_results.append(item)

        return unique_results


class SpectralReranker:
    def __init__(self):
        """
        Spectral clustering reranker initialization.
        """
        job_data_list = list(getjobData())
        self.job_data = pd.DataFrame(job_data_list)
        self.edu_mapping = {
            '初中及以下': 1, '中专及以下': 2, '中专/中技': 3, '高中': 4,
            '学历不限': 5, '大专': 6, '本科': 7, '硕士': 8, '博士': 9
        }
        # Save directory for plots
        self.plot_dir = 'cluster_plots'
        if not os.path.exists(self.plot_dir):
            os.makedirs(self.plot_dir)

    def _convert_education(self, edu):
        """
        将学历转换为数值。

        参数:
        edu (str): 学历。

        返回:
        int: 转换后的数值。
        """
        return self.edu_mapping.get(edu, 5)

    def _convert_experience(self, exp):
        """
        将工作经验转换为数值。

        参数:
        exp (str): 工作经验。

        返回:
        float: 转换后的数值。
        """
        exp = str(exp).strip()
        if exp == "经验不限":
            return 0
        if exp == "在校/应届":
            return 0.1
        if "个月" in exp:
            return float(exp.replace('个月', '').strip()) / 12
        if "年" in exp:
            exp = exp.replace('年', '').strip()
            if "-" in exp:
                return sum(map(float, exp.split('-'))) / 2
            elif "以内" in exp:
                return float(exp.replace('以内', '')) * 0.6
            elif "以上" in exp:
                return float(exp.replace('以上', '')) * 1.5
        return 0

    def _prepare_features(self, data):
        """
        准备特征数据。

        参数:
        data (DataFrame): 岗位数据。

        返回:
        array-like: 处理后的特征数据。
        """
        data = data.copy()
        data['workTag'] = data['workTag'].fillna('').astype(str)
        data['avgSalary'] = (data['maxSalary'] + data['minSalary']) / 2
        data['educational'] = data['educational'].apply(self._convert_education)
        data['workExperience'] = data['workExperience'].apply(self._convert_experience)

        tfidf = TfidfVectorizer(
            tokenizer=lambda x: x.split('/'),
            max_features=50,
            stop_words=['经验', '开发', '相关']
        )
        worktag_tfidf = tfidf.fit_transform(data['workTag'])

        X = hstack([
            worktag_tfidf,
            data[['educational', 'workExperience', 'avgSalary']].values.astype(float)
        ])

        scaler = StandardScaler(with_mean=False)
        return scaler.fit_transform(X)

    def plot_initial_data_points(self, features):
        """
        Plot the distribution of initial data points.

        Parameters:
        features (array-like): Feature data.
        """
        if features.shape[1] > 2:
            pca = PCA(n_components=2)
            features_2d = pca.fit_transform(features)
        else:
            features_2d = features

        plt.figure(figsize=(10, 8))
        plt.scatter(features_2d[:, 0], features_2d[:, 1], s=50, alpha=0.7)
        plt.title('Initial Data Distribution')
        plt.xlabel('Principal Component 1')
        plt.ylabel('Principal Component 2')
        plt.savefig(os.path.join(self.plot_dir, 'initial_data_distribution.png'))
        plt.close()
        
        # Return the 2D features and PCA for later use
        return features_2d, pca

    def plot_clustered_data(self, features, spectral_labels=None, kmeans_labels=None, kmeans_centroids=None, spectral_centroids=None):
        """
        Plot clustered data points for both Spectral and KMeans clustering.

        Parameters:
        features (array-like): 2D feature data after PCA.
        spectral_labels (array-like, optional): Spectral clustering labels.
        kmeans_labels (array-like, optional): KMeans clustering labels.
        kmeans_centroids (array-like, optional): KMeans cluster centroids.
        spectral_centroids (array-like, optional): Spectral cluster centroids.
        """
        # If only one clustering method is provided
        if spectral_labels is None and kmeans_labels is not None:
            plt.figure(figsize=(12, 10))
            self._plot_single_clustering(features, kmeans_labels, kmeans_centroids, "KMeans Clustering Result")
            plt.savefig(os.path.join(self.plot_dir, 'kmeans_clustering.png'))
            plt.close()
            return
        elif kmeans_labels is None and spectral_labels is not None:
            plt.figure(figsize=(12, 10))
            self._plot_single_clustering(features, spectral_labels, spectral_centroids, "Spectral Clustering Result")
            plt.savefig(os.path.join(self.plot_dir, 'spectral_clustering.png'))
            plt.close()
            return
            
        # If both clustering methods are provided, plot them side by side
        if spectral_labels is not None and kmeans_labels is not None:
            fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(20, 10))
            
            # Plot Spectral Clustering
            self._plot_clustering_on_axis(ax1, features, spectral_labels, spectral_centroids, "Spectral Clustering Result")
            
            # Plot KMeans Clustering
            self._plot_clustering_on_axis(ax2, features, kmeans_labels, kmeans_centroids, "KMeans Clustering Result")
            
            plt.tight_layout()
            plt.savefig(os.path.join(self.plot_dir, 'clustering_comparison.png'))
            plt.close()
    
    def _plot_single_clustering(self, features_2d, labels, centroids=None, title="Clustering Result"):
        """Helper method to plot a single clustering result"""
        cmap = plt.get_cmap('Set1')
        colors = [cmap(i) for i in np.unique(labels)]
        
        for i, label in enumerate(np.unique(labels)):
            indices = np.where(labels == label)[0]
            plt.scatter(
                features_2d[indices, 0],
                features_2d[indices, 1],
                c=[colors[i]],
                label=f'Cluster {label}',
                s=60,
                alpha=0.7
            )
            
        if centroids is not None:
            plt.scatter(
                centroids[:, 0],
                centroids[:, 1],
                c='black',
                marker='X',
                s=150,
                linewidths=2,
                label='Centroids'
            )
            
        plt.title(title)
        plt.xlabel('Principal Component 1')
        plt.ylabel('Principal Component 2')
        plt.legend(bbox_to_anchor=(1.05, 1), loc='upper left')
        plt.grid(alpha=0.3)
        
    def _plot_clustering_on_axis(self, ax, features_2d, labels, centroids=None, title="Clustering Result"):
        """Helper method to plot clustering on a specific axis"""
        cmap = plt.get_cmap('Set1')
        colors = [cmap(i) for i in np.unique(labels)]
        
        for i, label in enumerate(np.unique(labels)):
            indices = np.where(labels == label)[0]
            ax.scatter(
                features_2d[indices, 0],
                features_2d[indices, 1],
                c=[colors[i]],
                label=f'Cluster {label}',
                s=60,
                alpha=0.7
            )
            
        if centroids is not None:
            ax.scatter(
                centroids[:, 0],
                centroids[:, 1],
                c='black',
                marker='X',
                s=150,
                linewidths=2,
                label='Centroids'
            )
            
        ax.set_title(title)
        ax.set_xlabel('Principal Component 1')
        ax.set_ylabel('Principal Component 2')
        ax.legend(bbox_to_anchor=(1.05, 1), loc='upper left')
        ax.grid(alpha=0.3)

    def rerank(self, recall_results, user_collect_data):
        """
        Rerank using spectral clustering.

        Parameters:
        recall_results (list): Recalled job results.
        user_collect_data (list): User's collected job data.

        Returns:
        list: Reranked job list.
        """
        # Get the set of job IDs from user behavior data
        user_behavior_jids = {item['jid'] for item in user_collect_data}

        recall_jids = [item['jid'] for item in recall_results]
        recall_data = self.job_data[self.job_data['id'].isin(recall_jids)].reset_index(drop=True)
        features = self._prepare_features(recall_data).toarray()

        # Plot and save initial data distribution
        features_2d, pca = self.plot_initial_data_points(features)

        if features.shape[0] > 1:
            n_clusters = silhouette_method(features)
        else:
            n_clusters = 1

        cluster_model = SpectralClustering(
            n_clusters=n_clusters,
            affinity='cosine',
            random_state=42,
            n_init=10
        )
        cluster_labels = cluster_model.fit_predict(features)

        if features.shape[0] > 1:
            if len(np.unique(cluster_labels)) > 1:
                silhouette = silhouette_score(features, cluster_labels)
                ch_score = calinski_harabasz_score(features, cluster_labels)
                print(f"Spectral Clustering Metrics: Silhouette: {silhouette:.4f} | CH Score: {ch_score:.4f}")

            spectral_centroids = np.array([
                features[np.where(cluster_labels == i)].mean(axis=0)
                for i in range(n_clusters)
            ])
            
            # Project centroids to 2D for visualization
            if features.shape[1] > 2:
                spectral_centroids_2d = pca.transform(spectral_centroids)
            else:
                spectral_centroids_2d = spectral_centroids.copy()
                
            # Store the spectral clustering data for combined visualization later
            self.spectral_labels = cluster_labels
            self.spectral_centroids_2d = spectral_centroids_2d
            
            # Plot only spectral clustering for now
            self.plot_clustered_data(features_2d, spectral_labels=cluster_labels, spectral_centroids=spectral_centroids_2d)

        # Create mapping from job ID to recall_results index
        recall_id_to_index = {item['jid']: idx for idx, item in enumerate(recall_results)}

        # Create mapping from job ID to feature index
        feature_id_to_index = {int(recall_data.iloc[i]['id']): i for i in range(len(recall_data))}

        sorted_sets = []
        for item in user_collect_data:
            jid = item['jid']
            if jid not in feature_id_to_index:
                continue

            user_feature_index = feature_id_to_index[jid]
            user_cluster = cluster_labels[user_feature_index]
            user_feature = features[user_feature_index]

            # Get indices of positions in the same cluster
            cluster_indices = np.where(cluster_labels == user_cluster)[0]
            if len(cluster_indices) == 0:
                continue

            # Calculate similarity and sort
            cluster_features = features[cluster_indices]
            similarities = cosine_similarity(cluster_features, [user_feature])
            sorted_indices = similarities.flatten().argsort()[::-1]

            # Convert feature indices to job IDs, then find corresponding recall_results indices
            sorted_set = []
            for idx in sorted_indices:
                feature_idx = cluster_indices[idx]
                job_id = int(recall_data.iloc[feature_idx]['id'])
                # Skip positions in user behavior data
                if job_id in user_behavior_jids:
                    continue
                if job_id in recall_id_to_index:
                    recall_idx = recall_id_to_index[job_id]
                    sorted_set.append(recall_results[recall_idx])

            if sorted_set:
                sorted_sets.append(sorted_set)

        final_results = []
        index = 0
        while len(final_results) < 12 and index < max([len(s) for s in sorted_sets] if sorted_sets else [0]):
            for sorted_set in sorted_sets:
                if index < len(sorted_set):
                    result = sorted_set[index]
                    if result not in final_results:
                        final_results.append(result)
                    if len(final_results) >= 12:
                        break
            index += 1
            
        # Store the features_2d for later use
        self.features_2d = features_2d

        return final_results


class KMeansReranker(SpectralReranker):
    def __init__(self):
        """
        KMeans clustering reranker initialization.
        """
        super().__init__()  # Inherit parent class initialization method

    def rerank(self, recall_results, user_collect_data):
        """
        Rerank using KMeans clustering.

        Parameters:
        recall_results (list): Recalled job results.
        user_collect_data (list): User's collected job data.

        Returns:
        list: Reranked job list.
        """
        # Get the set of job IDs from user behavior data
        user_behavior_jids = {item['jid'] for item in user_collect_data}

        recall_jids = [item['jid'] for item in recall_results]
        recall_data = self.job_data[self.job_data['id'].isin(recall_jids)].reset_index(drop=True)
        features = self._prepare_features(recall_data).toarray()

        # If SpectralReranker has already been run, use the saved features_2d
        if hasattr(self, 'features_2d'):
            features_2d = self.features_2d
            # Create a dummy PCA for consistent interface
            pca = PCA(n_components=2)
            if features.shape[1] > 2:
                # Fit PCA on the features to ensure consistent transformation
                pca.fit(features)
        else:
            # Otherwise, compute them
            features_2d, pca = self.plot_initial_data_points(features)

        if features.shape[0] > 1:
            n_clusters = elbow_method(features)
        else:
            n_clusters = 1

        cluster_model = KMeans(
            n_clusters=n_clusters,
            init='k-means++',
            n_init=10,
            random_state=42
        )
        cluster_labels = cluster_model.fit_predict(features)

        if features.shape[0] > 1:
            if len(np.unique(cluster_labels)) > 1:
                silhouette = silhouette_score(features, cluster_labels)
                ch_score = calinski_harabasz_score(features, cluster_labels)
                print(f"KMeans Clustering - Silhouette: {silhouette:.4f} | CH Score: {ch_score:.4f}")

            # Project centroids to 2D for visualization
            if features.shape[1] > 2:
                kmeans_centroids_2d = pca.transform(cluster_model.cluster_centers_)
            else:
                kmeans_centroids_2d = cluster_model.cluster_centers_.copy()
                
            # If spectral clustering has already been run, plot both together
            if hasattr(self, 'spectral_labels') and hasattr(self, 'spectral_centroids_2d'):
                self.plot_clustered_data(
                    features_2d, 
                    spectral_labels=self.spectral_labels, 
                    kmeans_labels=cluster_labels, 
                    kmeans_centroids=kmeans_centroids_2d,
                    spectral_centroids=self.spectral_centroids_2d
                )
            else:
                # Otherwise just plot KMeans
                self.plot_clustered_data(features_2d, kmeans_labels=cluster_labels, kmeans_centroids=kmeans_centroids_2d)

        # Create mapping from job ID to recall_results index
        recall_id_to_index = {item['jid']: idx for idx, item in enumerate(recall_results)}

        # Create mapping from job ID to feature index
        feature_id_to_index = {int(recall_data.iloc[i]['id']): i for i in range(len(recall_data))}

        sorted_sets = []
        for item in user_collect_data:
            jid = item['jid']
            if jid not in feature_id_to_index:
                continue

            user_feature_index = feature_id_to_index[jid]
            user_cluster = cluster_labels[user_feature_index]
            user_feature = features[user_feature_index]

            # Get indices of positions in the same cluster
            cluster_indices = np.where(cluster_labels == user_cluster)[0]
            if len(cluster_indices) == 0:
                continue

            # Calculate Euclidean distance and sort (smaller distance means more similar)
            cluster_features = features[cluster_indices]
            distances = euclidean_distances(cluster_features, [user_feature])
            sorted_indices = distances.flatten().argsort()  # Sort by distance from small to large

            # Convert feature indices to job IDs, then find corresponding recall_results indices
            sorted_set = []
            for idx in sorted_indices:
                feature_idx = cluster_indices[idx]
                job_id = int(recall_data.iloc[feature_idx]['id'])
                # Skip positions in user behavior data
                if job_id in user_behavior_jids:
                    continue
                if job_id in recall_id_to_index:
                    recall_idx = recall_id_to_index[job_id]
                    sorted_set.append(recall_results[recall_idx])

            if sorted_set:
                sorted_sets.append(sorted_set)

        final_results = []
        index = 0
        while len(final_results) < 12 and index < max([len(s) for s in sorted_sets] if sorted_sets else [0]):
            for sorted_set in sorted_sets:
                if index < len(sorted_set):
                    result = sorted_set[index]
                    if result not in final_results:
                        final_results.append(result)
                    if len(final_results) >= 12:
                        break
            index += 1

        return final_results


if __name__ == "__main__":
    # 测试数据
    user_collect_data = [
        {'jid': 3269},
        {'jid': 2025},
        {'jid': 49}
    ]

    # user_collect_data = [
    #     {'jid': 3269},
    #     {'jid': 21028},
    #     {'jid': 49}
    # ]

    # 初始化推荐系统
    recall_system = ContentBasedRecall()
    recall_results = recall_system.get_recall(user_collect_data)
    print(f"Recalled {len(recall_results)} jobs")

    # 光谱聚类重排序
    spectral_reranker = SpectralReranker()
    spectral_results = spectral_reranker.rerank(recall_results, user_collect_data)
    print("\nSpectral Clustering Top 10:")
    for i, job in enumerate(spectral_results[:10], 1):
        print(f"{i}. {job} (JID: {job['jid']})")

    # KMeans聚类重排序
    kmeans_reranker = KMeansReranker()
    kmeans_results = kmeans_reranker.rerank(recall_results, user_collect_data)
    print("\nKMeans Clustering Top 10:")
    for i, job in enumerate(kmeans_results[:10], 1):
        print(f"{i}. {job} (JID: {job['jid']})")
