# encoding=utf8
import numpy as np
import scipy.sparse as sp
from scipy.sparse import coo_matrix, csr_matrix, lil_matrix
from scipy.sparse.linalg import eigsh
from tqdm import tqdm
import os
import gc
import sys
import time

class LargeGraphProcessor:
    def __init__(self, file_path, emb_dim=128):
        self.file_path = file_path 
        self.emb_dim = emb_dim  
        self.node_dict = {}  
        self.num_nodes = 0  
        self.adj_matrix = None 
        self.timings = {}  

    def _collect_nodes(self):
        """快速节点收集"""

        start_time = time.time()  

        nodes = set()  
        with open(self.file_path, 'r') as f:
            for line in tqdm(f, desc="扫描文件", unit="行", mininterval=1):
                if line.strip() and not line.startswith('#'):  
                    u, v = line.strip().split()[:2]  
                    nodes.update([u, v])  

        self.num_nodes = len(nodes)  
        self.node_dict = {n: i for i, n in enumerate(sorted(nodes, key=lambda x: int(x) if x.isdigit() else x))}
        self.timings['phase1'] = time.time() - start_time 

    def _stream_build_adjacency(self):
        """流式构建邻接矩阵"""

        start_time = time.time()  
        self.adj_matrix = lil_matrix((self.num_nodes, self.num_nodes), dtype=np.float32)

        chunk_size = 2_000_000  
        current_chunk = []  
        edge_cache = set()  

        with open(self.file_path, 'r') as f:
            pbar = tqdm(desc="处理边", unit="行", mininterval=1)  
            for line in f:
                pbar.update(1)  
                if not line.strip() or line.startswith('#'):  
                    continue

                parts = line.strip().split()  
                if len(parts) < 2:  
                    continue
                u, v = parts[0], parts[1]  
                edge_key = frozenset((u, v))  

                if edge_key in edge_cache:  
                    continue
                edge_cache.add(edge_key)  

                try:
                    i, j = self.node_dict[u], self.node_dict[v] 
                    current_chunk.append((i, j)) 
                    current_chunk.append((j, i))  

                except KeyError:  
                    pass

                if len(current_chunk) >= chunk_size:  
                    self._process_chunk(current_chunk)  
                    current_chunk = [] 
                    edge_cache.clear()  
                    gc.collect() 

            if current_chunk:  
                self._process_chunk(current_chunk)  

        self.adj_matrix = self.adj_matrix.tocsr()  
        self.adj_matrix.sum_duplicates()  
        self.timings['phase2'] = time.time() - start_time  

    def _process_chunk(self, chunk):
        """处理单个数据块"""

        rows, cols = zip(*chunk)  
        data = np.ones(len(rows), dtype=np.float32) 
        coo = coo_matrix(
            (data, (rows, cols)),
            shape=(self.num_nodes, self.num_nodes)
        )  
        self.adj_matrix = self.adj_matrix + coo.tolil()  

    def _compute_laplacian(self):
        """改进的分块拉普拉斯矩阵计算"""

        start_time = time.time()  

        block_size = 50_000 
        degree = np.array(self.adj_matrix.sum(1)).flatten().astype(np.float32)
        D_inv_sqrt = np.sqrt(np.reciprocal(degree, where=degree != 0))
        L = lil_matrix((self.num_nodes, self.num_nodes), dtype=np.float32)
    
        for i in tqdm(range(0, self.num_nodes, block_size), desc="处理分块"):
            j = min(i + block_size, self.num_nodes)  

            D_block = sp.diags(D_inv_sqrt[i:j], format='csr')
        
            adj_block = self.adj_matrix[i:j, :].astype(np.float32)
            
            normalized_block = D_block.dot(adj_block).dot(sp.diags(D_inv_sqrt))
        
            L_block = sp.eye(j - i, dtype=np.float32) - normalized_block[:, i:j]

            L[i:j, i:j] = L_block  

            del adj_block, normalized_block, L_block 
            gc.collect()  

        self.timings['phase3'] = time.time() - start_time  

        return L.tocsr()  

    def compute_embeddings(self):
        """主计算流程"""

        total_start = time.time()  

        self._collect_nodes()  
        self._stream_build_adjacency()  
        del self.node_dict  
        gc.collect()  

        L = self._compute_laplacian() 

        start_time = time.time()  
        eig_params = {
            'k': min(self.emb_dim + 20, self.num_nodes - 2),  
            'which': 'LM',  
            'tol': 1e-4,  
            'maxiter': 200  
        }
        eigenvalues, eigenvectors = eigsh(L, **eig_params)  

        valid_idx = eigenvalues > 1e-8  
        embeddings = eigenvectors[:, valid_idx]  

        if embeddings.shape[1] < self.emb_dim:  
            missing = self.emb_dim - embeddings.shape[1]  
            rand_vecs = np.random.randn(self.num_nodes, missing).astype(np.float32)

            rand_vecs -= embeddings @ (embeddings.T @ rand_vecs)  
            Q, _ = np.linalg.qr(rand_vecs, mode='reduced')  
            embeddings = np.hstack([embeddings, Q[:, :missing]]) 

        self.timings['phase4'] = time.time() - start_time  
        self.timings['total_compute'] = time.time() - total_start  
  
        return embeddings[:, :self.emb_dim]  

    def save_embeddings(self, embeddings, output_path):
        """安全保存嵌入结果"""

        start_time = time.time() 

        chunk_size = 100_000  
        with open(output_path, 'w') as f:
            f.write(f"{self.num_nodes} {self.emb_dim}\n")  

            for start in tqdm(range(0, self.num_nodes, chunk_size), desc="分块写入"):
                end = min(start + chunk_size, self.num_nodes)  
                chunk = embeddings[start:end]  

                lines = []
                for i in range(chunk.shape[0]):
                    line = f"{start + i} " + " ".join(f"{x:.6f}" for x in chunk[i]) + "\n"
                    lines.append(line)
                f.writelines(lines)  

        self.timings['save'] = time.time() - start_time  

if __name__ == "__main__":

    graph_name = "Yelp"  

    # input_file = f"../to_csv/{graph_name}.ungraph" 
    input_file = f"../to_csv/{graph_name}/{graph_name}.ungraph"
    # output_file = f"../to_csv/{graph_name}_Lap.emb"  
    output_file = f"../to_csv/{graph_name}/{graph_name}_Lap.emb"

    try:
        total_start = time.time()  
        processor = LargeGraphProcessor(input_file, emb_dim=128) 
        embeddings = processor.compute_embeddings()  
        processor.save_embeddings(embeddings, output_file) 

    except Exception as e:
        print(f"\n错误发生: {str(e)}")  
