import redis
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn.metrics import roc_auc_score, average_precision_score
import time
import os

REDIS_HOST = '127.0.0.1'
REDIS_PORT = 6379
GRAPH_NAME = 'Yelp_Ours'
BATCH_SIZE = 5000000  
FP16 = True
REDIS_BATCH_SIZE = 100000  

pool = redis.ConnectionPool(host=REDIS_HOST, port=REDIS_PORT)
r = redis.Redis(connection_pool=pool)

def load_data():

    start = time.time()

    node_ids = []
    page = 0
    while True:
        node_query = f"MATCH (n) WHERE EXISTS(n.`embedding:STRING`) RETURN n.`id:ID` SKIP {page*REDIS_BATCH_SIZE} LIMIT {REDIS_BATCH_SIZE}"
        nodes = r.execute_command("GRAPH.QUERY", GRAPH_NAME, node_query)[1]
        if not nodes:
            break
        for record in nodes:
            if record and record[0]:
                try:
                    node_ids.append(int(record[0]))
                except:
                    pass
        page += 1

    if not node_ids:
        raise ValueError("图数据中没有找到具有嵌入向量属性的节点")

    emb_matrix = []
    page = 0
    valid_count = 0
    dtype = np.float16 if FP16 else np.float32
    id_map = {nid: idx for idx, nid in enumerate(node_ids)}
    
    while page * REDIS_BATCH_SIZE < len(node_ids):
        batch_nodes = node_ids[page*REDIS_BATCH_SIZE : (page+1)*REDIS_BATCH_SIZE]
        emb_query = f"MATCH (n) WHERE ID(n) IN {batch_nodes} RETURN n.`embedding:STRING`"
        emb_result = r.execute_command("GRAPH.QUERY", GRAPH_NAME, emb_query)[1]
        
        for record in emb_result:
            try:
                emb_str = record[0].decode('utf-8')
                emb = np.fromstring(emb_str.strip('[]'), dtype=dtype, sep=',')
                if emb.shape[0] != 128:
                    emb = np.resize(emb, 128)
                emb_matrix.append(emb)
                valid_count += 1
            except:
                emb_matrix.append(np.zeros(128, dtype=dtype))
        page += 1

    emb_matrix = np.array(emb_matrix, dtype=dtype)
    norms = np.linalg.norm(emb_matrix.astype(np.float32), axis=1, keepdims=True)
    norms[norms == 0] = 1e-8
    emb_matrix = (emb_matrix.astype(np.float32) / norms).astype(dtype)
    
    pos_edges = []
    page = 0
    node_id_set = set(node_ids)
    
    while True:
        edge_query = f"""MATCH (a)-[]->(b)
                        WHERE EXISTS(a.`embedding:STRING`) AND EXISTS(b.`embedding:STRING`)
                        RETURN a.`id:ID`, b.`id:ID` 
                        SKIP {page*REDIS_BATCH_SIZE} LIMIT {REDIS_BATCH_SIZE}"""
        edges = r.execute_command("GRAPH.QUERY", GRAPH_NAME, edge_query)[1]
        if not edges:
            break
        
        for e in edges:
            try:
                src = int(e[0])
                dst = int(e[1])
                if src in node_id_set and dst in node_id_set:
                    pos_edges.append((src, dst))
            except:
                pass
        page += 1
    
    pos_edges = np.unique(np.sort(pos_edges, axis=1), axis=0)
    
    return emb_matrix, id_map, pos_edges

def generate_negatives(pos_edges, all_nodes, neg_ratio=1):
    """向量化优化后的负采样"""

    start = time.time()
    
    all_nodes = np.array(all_nodes)
    num_neg = int(pos_edges.shape[0] * neg_ratio)
    max_id = np.max(all_nodes)
    pos_comb = pos_edges[:,0] * (max_id + 1) + pos_edges[:,1]
    pos_comb = set(pos_comb)
    
    candidates = []
    while len(candidates) < num_neg:
        batch = np.random.choice(all_nodes, size=(num_neg*2, 2))
        batch = np.unique(np.sort(batch, axis=1), axis=0)
        batch = batch[batch[:,0] != batch[:,1]]
        batch_comb = batch[:,0] * (max_id + 1) + batch[:,1]
        mask = np.isin(batch_comb, list(pos_comb), assume_unique=False)
        candidates.extend(batch[~mask].tolist())
    
    candidates = np.unique(candidates, axis=0)[:num_neg]
    return candidates

def batched_cosine(emb_matrix, edges):
    start = time.time()
    
    src_idx = edges[:,0]
    dst_idx = edges[:,1]
    scores = np.empty(edges.shape[0], dtype=np.float32)
    
    for i in range(0, edges.shape[0], BATCH_SIZE):
        batch = slice(i, min(i+BATCH_SIZE, edges.shape[0]))
        src = emb_matrix[src_idx[batch]].astype(np.float32)
        dst = emb_matrix[dst_idx[batch]].astype(np.float32)
        scores[batch] = np.sum(src * dst, axis=1)
    
    return scores

def main():

    os.environ["NPY_USE_GPU_IF_AVAILABLE"] = "0"
    np.random.seed(42)
    
    emb_matrix, id_map, pos_edges = load_data()

    all_nodes = np.array(list(id_map.keys()))
    pos_edges_idx = np.array([(id_map[src], id_map[dst]) for src, dst in pos_edges])
    neg_edges = generate_negatives(pos_edges, all_nodes)
    neg_edges_idx = np.array([(id_map[src], id_map[dst]) for src, dst in neg_edges])
    
    X = np.concatenate([pos_edges_idx, neg_edges_idx])
    y = np.concatenate([np.ones(len(pos_edges_idx)), np.zeros(len(neg_edges_idx))])
    
    X_train, X_test, y_train, y_test = train_test_split(
        X, y, test_size=0.2, random_state=42
    )
    
    train_scores = batched_cosine(emb_matrix, X_train)
    test_scores = batched_cosine(emb_matrix, X_test)
    
    print("\n评估结果:")
    print(f"Train AUC: {roc_auc_score(y_train, train_scores):.4f}")
    print(f"Test  AUC: {roc_auc_score(y_test, test_scores):.4f}")
    print(f"Test  AP : {average_precision_score(y_test, test_scores):.4f}")

if __name__ == "__main__":
    main()