import numpy as np
import json
from redis import Redis
from redisgraph import Graph
from sklearn.preprocessing import StandardScaler, LabelBinarizer
from sklearn.model_selection import StratifiedKFold, train_test_split
from sklearn.metrics import accuracy_score, f1_score, roc_auc_score
from sklearn.linear_model import LogisticRegression
from sklearn.pipeline import make_pipeline
import threading
from queue import Queue

REDIS_HOST = 'localhost'
REDIS_PORT = 6379
GRAPH_NAME = 'Cora'
N_SPLITS = 5
RANDOM_STATE = 42
BATCH_SIZE = 1000

class DataLoader:
    def __init__(self, graph_name):
        self.redis_conn = Redis(host=REDIS_HOST, port=REDIS_PORT)
        self.graph = Graph(graph_name, self.redis_conn)
        self.dim = None

    def get_attribute_dimension(self, attribute_name):
        """获取属性维度"""

        result = self.graph.query(
            f"MATCH (n) WHERE n.`{attribute_name}` IS NOT NULL RETURN n.`{attribute_name}` LIMIT 1"
        )
        if not result.result_set:
            raise ValueError(f"属性 {attribute_name} 不存在")
        
        sample = result.result_set[0][0]
        if isinstance(sample, str):
            return len(json.loads(sample))
        elif isinstance(sample, list):
            return len(sample)
        else:
            raise TypeError(f"未知数据类型: {type(sample)}")

    def parallel_load(self, attribute_name):
        """并行加载数据"""

        if not self.check_attribute_exist(attribute_name):
            raise ValueError(f"属性 {attribute_name} 不存在")

        self.dim = self.get_attribute_dimension(attribute_name)
        
        total_nodes = self.graph.query("MATCH (n) RETURN count(n)").result_set[0][0]
        
        X = np.empty((total_nodes, self.dim), dtype=np.float32)
        y = np.empty(total_nodes, dtype=int)
        q = Queue()
        for offset in range(0, total_nodes, BATCH_SIZE):
            q.put(offset)

        lock = threading.Lock()
        current_idx = 0
        processed = 0

        def worker():
            nonlocal current_idx, processed
            local_graph = Graph(GRAPH_NAME, Redis(REDIS_HOST, REDIS_PORT))
            while True:
                offset = q.get()
                if offset is None:
                    break
                
                query = f"""
                    MATCH (n)
                    RETURN 
                        n.`{attribute_name}` AS embedding,
                        n.`label:INT` AS label
                    SKIP {offset}
                    LIMIT {BATCH_SIZE}
                """
                result = local_graph.query(query)
                
                with lock:
                    for record in result.result_set:
                        embedding_data = record[0]
                        if isinstance(embedding_data, str):
                            try:
                                embedding = json.loads(embedding_data)
                            except json.JSONDecodeError:
                                embedding = [float(x) for x in embedding_data.split(',')]
                        elif isinstance(embedding_data, list):
                            embedding = np.array(embedding_data, dtype=np.float32)
                        else:
                            raise TypeError(f"未知embedding类型: {type(embedding_data)}")
                        
                        label = int(record[1])
                        
                        X[current_idx] = embedding
                        y[current_idx] = label
                        current_idx += 1
                    
                    processed += len(result.result_set)
                    q.task_done()

        threads = []
        for _ in range(4): 
            t = threading.Thread(target=worker)
            t.start()
            threads.append(t)
        
        q.join()
        for _ in range(4):
            q.put(None)
        for t in threads:
            t.join()

        return X, y

def evaluate_model(X, y):
    """模型评估流水线"""

    X_train, X_test, y_train, y_test = train_test_split(
        X, y, test_size=0.2, stratify=y, random_state=RANDOM_STATE
    )
    
    pipeline = make_pipeline(
        StandardScaler(),
        LogisticRegression(
            max_iter=1000,
            multi_class='multinomial',
            solver='lbfgs',
            class_weight='balanced',
            random_state=RANDOM_STATE
        )
    )
    
    pipeline.fit(X_train, y_train)
    y_pred = pipeline.predict(X_test)
    y_proba = pipeline.predict_proba(X_test)
    
    metrics = {
        'accuracy': accuracy_score(y_test, y_pred),
        'f1_macro': f1_score(y_test, y_pred, average='macro'),
        'roc_auc_ovr': roc_auc_score(
            LabelBinarizer().fit_transform(y_test), 
            y_proba, 
            multi_class='ovr', 
            average='macro'
        )
    }
    
    return metrics

def main():
 
    loader = DataLoader(GRAPH_NAME)
    
    try:

        X_embed, y = loader.parallel_load('embedding:STRING')
        
        metrics = evaluate_model(X_embed, y)
        for k, v in metrics.items():
            print(f"{k.upper():<10}: {v:.4f}")
            
    except Exception as e:
        print(f"处理失败: {str(e)}")

if __name__ == "__main__":
    main()