import numpy as np
import scipy.sparse as sp
from scipy.sparse import coo_matrix, csr_matrix
from tqdm import tqdm
import os
import gc
import time
import shutil

class RobustGCN:
    def __init__(self, file_path, emb_dim=128, hidden_dim=256, 
                 num_layers=3, dropout=0.3, alpha=0.5):
        self.file_path = file_path
        self.emb_dim = emb_dim
        self.hidden_dim = hidden_dim
        self.num_layers = num_layers
        self.dropout = dropout
        self.alpha = alpha
        self.node_dict = {}
        self.num_nodes = 0
        self.adj_matrix = None
        self.timings = {}

    def _collect_nodes(self):
        """优化的节点收集"""

        start = time.time()
        nodes = set()
        with open(self.file_path, 'r') as f:
            for line in tqdm(f, desc="解析文件", unit="行"):
                if line.startswith('#') or not line.strip(): continue
                u, v = line.strip().split()[:2]
                nodes.update([u, v])
        self.num_nodes = len(nodes)
        self.node_dict = {n:i for i,n in enumerate(sorted(nodes))}
        self.timings['phase1'] = time.time() - start

    def _build_sparse_adjacency(self):
        """基于磁盘的邻接矩阵构建"""

        start = time.time()
        
        temp_dir = "./adj_temp"
        os.makedirs(temp_dir, exist_ok=True)
        
        batch_size = 1000000
        total_edges = 0
        current_batch = 0
        
        with open(self.file_path, 'r') as f:
            while True:
                rows = np.zeros(batch_size, dtype=np.int32)
                cols = np.zeros(batch_size, dtype=np.int32)
                count = 0
                
                for _ in range(batch_size):
                    line = f.readline()
                    if not line: break
                    if line.startswith('#') or not line.strip(): continue
                    
                    try:
                        u, v = line.strip().split()[:2]
                        if u in self.node_dict and v in self.node_dict:
                            rows[count] = self.node_dict[u]
                            cols[count] = self.node_dict[v]
                            count += 1
                    except:
                        continue
                
                if count == 0: break
                
                np.savez_compressed(
                    os.path.join(temp_dir, f"batch_{current_batch}.npz"),
                    rows=rows[:count],
                    cols=cols[:count]
                )
                total_edges += count
                current_batch += 1
                del rows, cols
                gc.collect()

        adj = coo_matrix((self.num_nodes, self.num_nodes), dtype=np.float32)
        for batch_file in tqdm(os.listdir(temp_dir), desc="处理批次"):
            data = np.load(os.path.join(temp_dir, batch_file))
            batch_adj = coo_matrix(
                (np.ones(len(data['rows']), dtype=np.float32), 
                 (data['rows'], data['cols'])),
                shape=(self.num_nodes, self.num_nodes)
            )
            adj += batch_adj
            adj.sum_duplicates()  
            del batch_adj
            gc.collect()
        
        adj = adj.tocsr()
        adj = adj.maximum(adj.T)  
        adj.setdiag(1.0) 
        
        degree = np.array(adj.sum(1)).flatten().astype(np.float32)
        D_inv_sqrt = sp.diags(1.0 / np.sqrt(degree + 1e-8), dtype=np.float32)
        self.adj_matrix = D_inv_sqrt.dot(adj).dot(D_inv_sqrt).tocsr()

        shutil.rmtree(temp_dir)
        del adj, D_inv_sqrt, degree
        gc.collect()

        self.timings['phase2'] = time.time() - start

    def _glorot_init(self, input_dim, output_dim):
        """内存优化的初始化"""

        scale = np.sqrt(6.0 / (input_dim + output_dim))
        return np.random.uniform(-scale, scale, (input_dim, output_dim)).astype(np.float32)

    def _gcn_propagate(self):
        """增量式特征传播"""

        start = time.time()
        
        degree = np.array(self.adj_matrix.sum(1)).flatten().astype(np.float32)
        features = np.hstack([
            np.random.normal(size=(self.num_nodes, 64)).astype(np.float32),
            np.log(degree + 1).reshape(-1,1),
            (degree == 0).astype(np.float32).reshape(-1,1)
        ])

        current_h = features
        residual_proj = None
        for layer in range(self.num_layers):
            output_dim = self.hidden_dim if layer < self.num_layers-1 else self.emb_dim
            W = self._glorot_init(current_h.shape[1], output_dim)

            if sp.issparse(current_h):
                h = self.adj_matrix.dot(current_h.dot(W))
            else:
                h = self.adj_matrix.dot(np.ascontiguousarray(current_h).dot(W))

            if layer > 0:
                if residual_proj is None or residual_proj.shape[1] != output_dim:
                    residual_proj = self._glorot_init(current_h.shape[1], output_dim)
                h = self.alpha * h + (1 - self.alpha) * current_h.dot(residual_proj)

            if layer < self.num_layers-1:
                h = np.maximum(h, 0)  
                h = (h - np.mean(h, axis=0)) / (np.std(h, axis=0) + 1e-8)  
                if self.dropout > 0:
                    mask = (np.random.rand(*h.shape) > self.dropout).astype(np.float32)
                    h *= mask / (1.0 - self.dropout)

            current_h = h
            
            if layer % 2 == 0:
                gc.collect()

        embeddings = current_h / (np.linalg.norm(current_h, axis=1, keepdims=True) + 1e-8)
        self.timings['phase3'] = time.time() - start

        return embeddings

    def save_embeddings(self, embeddings, output_path):
        """内存映射保存"""

        start = time.time()
        os.makedirs(os.path.dirname(output_path), exist_ok=True)
        
        embeddings = embeddings.astype(np.float32)
        
        with open(output_path, 'w') as f:
            f.write(f"{self.num_nodes} {self.emb_dim}\n")
            batch_size = 50000
            for idx in tqdm(range(0, self.num_nodes, batch_size), desc="写入批次"):
                batch = embeddings[idx:idx+batch_size]
                lines = []
                for i in range(batch.shape[0]):
                    node_id = idx + i
                    emb_str = " ".join(f"{x:.6f}" for x in batch[i])
                    lines.append(f"{node_id} {emb_str}")
                f.write("\n".join(lines) + "\n")
                f.flush()
        
        self.timings['phase4'] = time.time() - start

if __name__ == "__main__":
    graph_name = "TWeibo"
    CONFIG = {
        "file_path": f"../to_csv/{graph_name}/{graph_name}.ungraph",
        "output_path": f"../to_csv/{graph_name}/{graph_name}_GCN.emb",
        "emb_dim": 128,
        "hidden_dim": 256,
        "num_layers": 3,
        "dropout": 0.3,
        "alpha": 0.5
    }

    try:
        total_start = time.time()
        processor = RobustGCN(
            file_path=CONFIG['file_path'],
            emb_dim=CONFIG['emb_dim'],
            hidden_dim=CONFIG['hidden_dim'],
            num_layers=CONFIG['num_layers'],
            dropout=CONFIG['dropout'],
            alpha=CONFIG['alpha']
        )

        processor._collect_nodes()
        processor._build_sparse_adjacency()
        embeddings = processor._gcn_propagate()
        processor.save_embeddings(embeddings, CONFIG['output_path'])

    except Exception as e:
        print(f"\n执行异常: {str(e)}")
        gc.collect()