import numpy as np
import json
from redis import Redis
from redisgraph import Graph
from sklearn.preprocessing import StandardScaler, LabelBinarizer
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score, f1_score, roc_auc_score
from sklearn.linear_model import LogisticRegression
from sklearn.pipeline import make_pipeline
import threading
from queue import Queue

import warnings
warnings.filterwarnings("ignore", category=Warning)

# 配置参数
REDIS_HOST = 'localhost'  
REDIS_PORT = 6379  
GRAPH_NAME = 'Yelp_Ours' 
N_SPLITS = 5 
RANDOM_STATE = 42 
BATCH_SIZE = 1000 
class DataLoader:
    def __init__(self, graph_name):
        self.redis_conn = Redis(host=REDIS_HOST, port=REDIS_PORT)  
        self.graph = Graph(graph_name, self.redis_conn) 
        self.dim = None 

    def check_attribute_exist(self, attribute_name):
        """检查属性是否存在"""
   
        result = self.graph.query(
            f"MATCH (n) WHERE n.`{attribute_name}` IS NOT NULL RETURN count(n) > 0 AS exist LIMIT 1"
        )

        return result.result_set[0][0] if result.result_set else False

    def get_attribute_dimension(self, attribute_name):
        """获取属性维度"""

        if not self.check_attribute_exist(attribute_name):
            raise ValueError(f"属性 {attribute_name} 不存在")  
        
        result = self.graph.query(
            f"MATCH (n) WHERE n.`{attribute_name}` IS NOT NULL RETURN n.`{attribute_name}` LIMIT 1"
        )

        sample = result.result_set[0][0]  

        if isinstance(sample, str):
            return len(json.loads(sample))  
        
        elif isinstance(sample, list):
            return len(sample)  
        
        else:
            raise TypeError(f"未知数据类型: {type(sample)}")  

    def parallel_load(self, attribute_name):
        """并行加载数据（增强版）"""

        if not self.check_attribute_exist(attribute_name):
            raise ValueError(f"属性 {attribute_name} 不存在")  

        self.dim = self.get_attribute_dimension(attribute_name)

        total_nodes = self.graph.query("MATCH (n) RETURN count(n)").result_set[0][0]  
        
        X = np.empty((total_nodes, self.dim), dtype=np.float32)
        y = np.empty(total_nodes, dtype=int)
        q = Queue()
        for offset in range(0, total_nodes, BATCH_SIZE):
            q.put(offset)  

        lock = threading.Lock()  
        current_idx = 0  
        processed = 0  

        def worker():
            nonlocal current_idx, processed  
            local_graph = Graph(GRAPH_NAME, Redis(REDIS_HOST, REDIS_PORT)) 
            while True:
                offset = q.get()  
                if offset is None:
                    break  
                
                query = f"""
                    MATCH (n)
                    RETURN 
                        n.`{attribute_name}` AS embedding,
                        n.`label:INT` AS label
                    SKIP {offset}
                    LIMIT {BATCH_SIZE}
                """
                result = local_graph.query(query)  
                
                with lock: 
                    for record in result.result_set:
                        embedding_data = record[0]
                        try:
                            if isinstance(embedding_data, str):
                                embedding_str = embedding_data.strip().replace(' ', '')
                                if embedding_str.startswith('[') and embedding_str.endswith(']'):
                                    embedding = json.loads(embedding_str)  
                                else:
                                    embedding = [float(x) for x in embedding_str.split(',')]  
                            elif isinstance(embedding_data, list):
                                embedding = np.array(embedding_data, dtype=np.float32)  
                            else:
                                raise TypeError(f"未知类型: {type(embedding_data)}")  
    
                            if len(embedding) != self.dim:
                                raise ValueError(f"维度不一致: {len(embedding)} (预期{self.dim})")  
                            
                            X[current_idx] = embedding  
                            y[current_idx] = int(record[1])
                            current_idx += 1  
                        
                        except Exception as e:
                            print(f"处理记录 {offset + processed} 失败: {str(e)}")  
                            continue
                
                processed += len(result.result_set)  
                q.task_done()  

        threads = []
        for _ in range(4):  
            t = threading.Thread(target=worker)
            t.start() 
            threads.append(t)  
        
        q.join()  
        for _ in range(4):
            q.put(None) 
        for t in threads:
            t.join() 

        return X, y 

def evaluate_model(X, y):
    """新增数据过滤步骤"""

    valid_mask = ~np.all(X == 0, axis=1)  
    X_filtered = X[valid_mask]  
    y_filtered = y[valid_mask] 
    
    if len(X_filtered) == 0:
        raise ValueError("所有embedding向量均无效！")  
    
    print(f"过滤前样本数: {X.shape[0]}") 
    print(f"过滤后样本数: {X_filtered.shape[0]}")  
    print(f"无效样本比例: {(X.shape[0] - X_filtered.shape[0])/X.shape[0]:.1%}") 
    
    X_train, X_test, y_train, y_test = train_test_split(
        X_filtered, y_filtered, test_size=0.2, stratify=y_filtered, random_state=RANDOM_STATE
    )  
    
    pipeline = make_pipeline(
        StandardScaler(), 
        LogisticRegression(
            max_iter=1000,  
            multi_class='multinomial',  
            solver='lbfgs', 
            class_weight='balanced',  
            random_state=RANDOM_STATE  
        )
    )
    
    pipeline.fit(X_train, y_train)  
    y_pred = pipeline.predict(X_test) 
    y_proba = pipeline.predict_proba(X_test)  

    metrics = {
        'accuracy': accuracy_score(y_test, y_pred), 
        'f1_macro': f1_score(y_test, y_pred, average='macro'),  
        'roc_auc_ovr': roc_auc_score(
            LabelBinarizer().fit_transform(y_test), 
            y_proba, 
            multi_class='ovr', 
            average='macro'
        )  
    }
    
    return metrics  

def main():

    loader = DataLoader(GRAPH_NAME)  

    try:
        print("开始加载embedding数据...")  
        X_embed, y = loader.parallel_load('embedding:STRING')  
        print(f"\n数据加载完成: {X_embed.shape[0]} 条样本") 
        
        metrics = evaluate_model(X_embed, y)  
        print("\n评估结果:") 
        for k, v in metrics.items():
            print(f"{k.upper():<10}: {v:.4f}") 
            
    except Exception as e:
        print(f"处理失败: {str(e)}") 
        
if __name__ == "__main__":
    main()  