import numpy as np
import scipy.sparse as sp
from scipy.sparse import csr_matrix
from scipy import linalg
from scipy.special import iv
from sklearn import preprocessing
from sklearn.utils.extmath import randomized_svd
from scipy.sparse import coo_matrix  
import argparse
import time
import os
import gc
from tqdm import tqdm

class UltraMemoryProNE:
    def __init__(self, graph_file, emb_file, dimension):
        self.graph_file = graph_file 
        self.emb_file = emb_file  
        self.dimension = dimension  
        self.node_dict = self._collect_nodes()  
        self.num_nodes = len(self.node_dict)  
        self.matrix = self._streaming_build() 
        gc.collect()  

    def _collect_nodes(self):
        """快速节点收集，避免较大内存占用"""

        nodes = set() 
        with open(self.graph_file, 'r') as f:
            for line in tqdm(f, desc="Reading edges", mininterval=5):
                if not line.strip(): continue  
                u, v = line.strip().split()[:2]  
                if u != v:
                    nodes.update([u, v])  

        return {n: idx for idx, n in enumerate(sorted(nodes, key=int))}

    def _streaming_build(self):
        """流式矩阵构建"""

        chunk_size = 500_000 
        matrix = csr_matrix((self.num_nodes, self.num_nodes), dtype=np.int8)
        edge_cache = set()  

        def process_chunk(chunk):
            """处理单个分块并立即释放内存"""

            rows, cols = zip(*chunk)
            coo = coo_matrix(
                (np.ones(len(rows), dtype=np.int8), (rows, cols)),
                shape=(self.num_nodes, self.num_nodes)
            )  
            return coo.tocsr()  

        with open(self.graph_file, 'r') as f:
            chunk = []  
            for line in tqdm(f, desc="Processing edges", mininterval=5):
                if not line.strip(): continue 
                u, v = line.strip().split()[:2]  
                if u == v: continue  

                edge_key = (u, v) if u < v else (v, u)  
                if edge_key in edge_cache:
                    continue 
                edge_cache.add(edge_key) 

                try:
                    i, j = self.node_dict[u], self.node_dict[v]  
                    chunk.extend([(i, j), (j, i)])  
                except KeyError:
                    pass 

                if len(chunk) >= chunk_size:  
                    matrix += process_chunk(chunk)  
                    chunk = []  
                    edge_cache.clear()  
                    gc.collect()  

            if chunk:  
                matrix += process_chunk(chunk) 

        matrix.sum_duplicates()  
        matrix.data = np.ones_like(matrix.data)  
        return matrix  
    def get_embedding_rand(self, matrix):
        """单精度SVD降低内存"""

        matrix = matrix.astype(np.float32) 
        U, Sigma, _ = randomized_svd(
            matrix, 
            n_components=self.dimension,  
            n_iter=3,  
            random_state=42  
        )
        U = U * np.sqrt(Sigma)  
        return preprocessing.normalize(U, norm="l2") 

    def pre_factorization(self):
        """预分解阶段"""

        C = preprocessing.normalize(self.matrix, norm='l1')
        col_sum = np.array(C.sum(axis=0)).ravel()  
        neg_weights = np.power(col_sum, 0.75)  
        neg_weights /= neg_weights.sum() 
        del col_sum 
        F = C - C.dot(sp.diags(neg_weights))  
        del C, neg_weights  
        gc.collect()  
        return self.get_embedding_rand(F)  

    def chebyshev_gaussian_optimized(self, features, order=3, mu=0.2, s=0.5):
        """内存极简的谱传播"""

        features = features.astype(np.float32)  
        matrix = self.matrix.astype(np.float32)  
        row_sum = matrix.sum(axis=1).A.ravel()  
        inv_row_sum = np.power(row_sum, -1.0)  
        inv_row_sum[~np.isfinite(inv_row_sum)] = 0.0  
        D_inv = sp.diags(inv_row_sum) 
        L = sp.eye(self.num_nodes) - D_inv.dot(matrix)  
        M = L - mu * sp.eye(self.num_nodes) 
        del D_inv, row_sum, inv_row_sum  
        gc.collect()  

        conv = iv(0, s) * features  
        if order >= 1:
            Lx1 = M.dot(features)  
            update = 0.5 * M.dot(Lx1) - features  
            conv -= 2 * iv(1, s) * update  
            del update  
            gc.collect()  

        for i in range(2, order):
            Lx_new = M.dot(Lx1)  
            Lx_new = M.dot(Lx_new) - 2 * Lx1 - features  
            Lx_new = Lx_new.astype(np.float32)  

            factor = 2 * iv(i, s)  
            if i % 2 == 0:
                conv += factor * Lx_new  
            else:
                conv -= factor * Lx_new

            del Lx1  
            Lx1 = Lx_new  
            del Lx_new  
            gc.collect() 

        diff = (features - conv).astype(np.float32)  
        del features, conv  
        gc.collect()  

        chunk_size = 10000  
        emb = np.zeros_like(diff)  
        for i in tqdm(range(0, self.num_nodes, chunk_size), desc="Matrix multiply"):
            j = min(i+chunk_size, self.num_nodes)  
            emb[i:j] = diff[i:j] + matrix[i:j].dot(diff)  

        return preprocessing.normalize(emb, norm="l2")  

    def save_embeddings(self, emb, filename):
        """分块保存嵌入结果"""
        
        chunk_size = 100000  
        with open(filename, 'w') as f:
            f.write(f"{self.num_nodes} {self.dimension}\n")  
            id_map = {v: k for k, v in self.node_dict.items()}

            for i in tqdm(range(0, self.num_nodes, chunk_size), desc="Writing chunks"):
                chunk = emb[i:i+chunk_size]  
                lines = []
                for idx in range(chunk.shape[0]):
                    node_id = id_map[i + idx]  
                    line = f"{node_id} " + " ".join(f"{x:.6f}" for x in chunk[idx]) + "\n"
                    lines.append(line)
                f.writelines(lines) 

def main():
    args = parse_args()  

    start_time = time.time()  # 记录开始时间

    model = UltraMemoryProNE(args.graph, args.emb2, args.dimension)

    base_emb = model.pre_factorization()  
    enhanced_emb = model.chebyshev_gaussian_optimized(base_emb, order=3, mu=0.2, s=0.5)
    model.save_embeddings(enhanced_emb, args.emb2)  

    total_time = time.time() - start_time  
    print(f"\n生成嵌入文件 {args.emb2}的总时间为: {total_time:.2f} 秒")


def parse_args():
    parser = argparse.ArgumentParser()  
    parser.add_argument('-graph', required=True, help="Input graph file path")
    parser.add_argument('-emb2', required=True, help="Output spectral embedding file path")
    parser.add_argument('-dimension', type=int, default=128, help="Embedding dimension")
    parser.add_argument('-order', type=int, default=3, help="Chebyshev polynomial order")
    parser.add_argument('-mu', type=float, default=0.2, help="Smoothing parameter")
    parser.add_argument('-theta', type=float, default=0.5, help="Spectral propagation parameter")
    return parser.parse_args()  

if __name__ == "__main__":
    main() 