# -*- coding: utf-8 -*-
import pandas as pd
import os
import re
import chardet
from neo4j import GraphDatabase
import warnings
warnings.filterwarnings('ignore', category=UserWarning, module='openpyxl')

class Neo4jInitializer:
    def __init__(self, excel_path: str, csv_dir: str, output_dir: str):
        self.excel_path = excel_path
        self.csv_dir = os.path.abspath(csv_dir).replace("\\", "/")
        self.output_dir = output_dir
        self.driver = GraphDatabase.driver("bolt://localhost:7687", auth=("neo4j", "123456789"))
        os.makedirs(self.output_dir, exist_ok=True)
        self.relationship_config = []
        self.index_names = []  # 新增：存储索引名称
        self.constraint_names = []  # 新增：存储约束名称

    def _extract_physical_name(self, sheet_name: str) -> str:
        df = pd.read_excel(
            self.excel_path,
            sheet_name=sheet_name,
            header=None,  # 不使用默认表头
            usecols="D",  # 只读取D列
            engine="openpyxl"
        )

        # 手动指定列名
        df.columns = ['PhysicalName']

        # 定位D列第二行（索引为1的行）
        raw_value = df.iloc[1, 0]

        if pd.isna(raw_value) or str(raw_value).strip() == "":
            raise ValueError(f"Sheet [{sheet_name}] 的D列第二行未配置物理名称")

        return str(raw_value).strip()        

    def _parse_relationship_csv(self):
        """解析relationships目录下的CSV文件（严格匹配逗号分隔格式）"""
        rel_dir = os.path.join(self.csv_dir, "relationships")
        for filename in os.listdir(rel_dir):
            if filename.startswith("rels_") and filename.endswith(".csv"):
                # 提取关系类型（如 rels_contains.csv → CONTAINS）
                rel_type = filename[5:-4].upper().replace('_', ' ')
                
                # 读取CSV文件（严格验证列名）
                df = pd.read_csv(os.path.join(rel_dir, filename))
                if not {'startId', 'endId'}.issubset(df.columns):
                    raise ValueError(f"CSV文件 {filename} 必须包含 startId 和 endId 列")
                
                # 生成关系配置
                for _, row in df.iterrows():
                    self.relationship_config.append({
                        "rel_type": rel_type,
                        "start_id": str(row['startId']).strip(),
                        "end_id": str(row['endId']).strip()
                    })
                
    def _parse_relationship_sheet(self, sheet_name: str):
        """严格匹配截图中的表结构进行解析"""
        csv_filename = None
        try:
            # 读取Excel（严格匹配截图列范围B:H）
            df = pd.read_excel(
                self.excel_path,
                sheet_name=sheet_name,
                header=3,  # 数据从第4行开始（对应截图）
                usecols="B:H",  # B到H列对应截图中的字段
                engine="openpyxl"
            )
            
            # 列名清洗（完全匹配截图表头）
            df.columns = [
                'No', '关系类型', '起点类型', '终点类型', 
                '是否允许多对多', '约束条件字段', '属性'
            ]
            
            for _, row in df.iterrows():
                # 动态生成CSV文件名（清洗隐藏字符）
                rel_type = self.clean_string(row['关系类型']).lower().replace(' ', '_')
                csv_filename = f"rels_{rel_type}.csv"
                
                # 解析约束字段（处理截图中的分隔符 →）
                constraint = self.clean_string(row['约束条件字段'])
                print(f"[DEBUG] 清洗后约束字段: {constraint}")
                
                # 强制校验 → 分隔符
                if '→' not in constraint:
                    raise ValueError(f"约束字段格式错误，应为 'X→Y'，实际为: {constraint}")
                start_field, end_field = constraint.split('→')
                start_field = self.clean_string(start_field)
                end_field = self.clean_string(end_field)
                
                csv_path = os.path.join(self.csv_dir, "relationships", csv_filename)
                
                if os.path.exists(csv_path):
                    df_rel = pd.read_csv(csv_path)
                    # 清洗CSV列名（匹配截图中的字段）
                    df_rel.columns = [self.clean_string(col) for col in df_rel.columns]
                    
                    for _, rel_row in df_rel.iterrows():
                        # 清洗数据值
                        start_id = self.clean_string(rel_row.get(start_field, ''))
                        end_id = self.clean_string(rel_row.get(end_field, ''))
                        
                        # 动态解析属性（过滤无效值）
                        properties = {}
                        if pd.notna(row['属性']):
                            for prop in self.clean_string(row['属性']).split(','):
                                prop = prop.strip()
                                # 跳过空值、-、nan等无效属性
                                if not prop or prop in ['-', 'nan']:
                                    continue
                                if prop not in rel_row:
                                    print(f"⚠️ 警告: CSV文件 {csv_filename} 缺失属性列 {prop}，已跳过")
                                    continue
                                    
                                value = self.clean_string(rel_row[prop])
                                # 将空值、null、nan统一转为 'nan'
                                if value in ['', 'null', 'nan', None]:
                                    value = 'nan'
                                properties[prop] = value
                        
                        self.relationship_config.append({
                            "rel_type": self.clean_string(row['关系类型']).upper(),
                            "start_type": self.clean_string(row['起点类型']),
                            "end_type": self.clean_string(row['终点类型']),
                            "start_field": start_field,
                            "end_field": end_field,
                            "start_id": start_id,
                            "end_id": end_id,
                            "properties": properties
                        })
                        
        except Exception as e:
            error_msg = f"""
            解析失败! 详细信息:
            - 错误位置: {sheet_name} 表
            - 关系类型: {csv_filename or '未知'}
            - 约束字段: {constraint if 'constraint' in locals() else '未解析'}
            - 错误原因: {str(e)}
            """
            print(error_msg)
            raise
        
    def _parse_node_sheet(self, sheet_name: str) -> dict:
        """精准解析节点设计Sheet（完全匹配截图文件名）"""
        try:
            physical_name = self._extract_physical_name(sheet_name)
            # 生成CSV路径（文件名小写，标签驼峰式）
            csv_filename = f"nodes_{physical_name.lower()}.csv"  # 示例: nodes_workbench.csv
            csv_path = os.path.join(self.csv_dir,"nodes", csv_filename)
            print(f"[CHECK] CSV路径: {csv_path}")  # 调试输出
            
            # 读取CSV数据（处理空值）
            df_data = pd.read_csv(csv_path, encoding='utf-8-sig').fillna('')
            print(f"[DEBUG] CSV数据样例:\n{df_data.head(2)}")  # 数据预览
            
            # 生成Cypher语句
            nodes_cypher = []
            for _, row in df_data.iterrows():
                props = [f"{col}: '{str(val).strip()}'" for col, val in row.items() if str(val).strip()]
                nodes_cypher.append(f"CREATE (:`{physical_name}` {{{', '.join(props)}}});")
            
            return {"cypher": nodes_cypher}
        
        except Exception as e:
            print(f"节点Sheet解析失败: {str(e)}")
            raise
            
    def _generate_indexes(self):
        indexes = []
        seen = set()
        for config in self.relationship_config:
            start_type = config['start_type'].replace('\u200b', '')
            start_field = config['start_field'].replace('\u200b', '')
            end_type = config['end_type'].replace('\u200b', '')
            end_field = config['end_field'].replace('\u200b', '')
            
            # 生成索引名称
            start_index_name = f"{start_type.lower()}_{start_field}_index"
            end_index_name = f"{end_type.lower()}_{end_field}_index"
            
            if start_index_name not in seen:
                indexes.append(
                    f"CREATE INDEX {start_index_name} IF NOT EXISTS "
                    f"FOR (n:{start_type}) ON (n.{start_field});"
                )
                seen.add(start_index_name)
            
            if end_index_name not in seen:
                indexes.append(
                    f"CREATE INDEX {end_index_name} IF NOT EXISTS "
                    f"FOR (n:{end_type}) ON (n.{end_field});"
                )
                seen.add(end_index_name)
        return indexes
        
    def _generate_constraints(self):
        """生成唯一性约束（显式命名）"""
        constraints = []
        # 根据截图中的约束信息添加
        constraint_configs = [
            {"label": "Line", "field": "lineId"},
            {"label": "Equipment", "field": "code"}
        ]
        for config in constraint_configs:
            constraint_name = f"constraint_{config['label'].lower()}_{config['field']}_unique"
            constraints.append(
                f"CREATE CONSTRAINT {constraint_name} IF NOT EXISTS "
                f"FOR (n:{config['label']}) REQUIRE n.{config['field']} IS UNIQUE;"
            )
            self.constraint_names.append(constraint_name)  # 记录约束名称
        return constraints

    def _generate_relationships(self):
        """全动态属性生成（无硬编码）"""
        relationships = []
        for config in self.relationship_config:
            prop_pairs = []
            for prop, value in config['properties'].items():
                # 判断是否为空或无效值（统一转为 'nan' 字符串）
                if pd.isna(value) or value in ['null', '', 'nan', None]:
                    prop_str = "'nan'"
                
                else:
                    try:
                        # 尝试将值转换为浮点数
                        numeric_value = float(value)
                        if numeric_value.is_integer():
                            prop_str = str(int(numeric_value))  # 整数类型
                        else:
                            prop_str = str(numeric_value)       # 浮点类型
                    except (ValueError, TypeError):
                        # 非数值类型，视为普通字符串并加引号
                        prop_str = f"'{value}'"
                
                # 添加到属性对列表
                prop_pairs.append(f"{prop}: {prop_str}")
            
            props_str = ", ".join(prop_pairs)

            # 动态生成Cypher语句
            cypher = f"""
    MATCH (start:`{config['start_type']}` {{{config['start_field']}: '{config['start_id']}'}})
    MATCH (end:`{config['end_type']}` {{{config['end_field']}: '{config['end_id']}'}})
    MERGE (start)-[:`{config['rel_type']}` {{{props_str}}}]->(end);
            """
            relationships.append(cypher.strip())
        
        return relationships
        
    def _generate_clean_cypher(self):
        """生成清理脚本（精准匹配截图约束）"""
        clean_cypher = [
            "MATCH (n) DETACH DELETE n;"
        ]
        # 删除索引
        for name in self.index_names:
            clean_cypher.append(f"DROP INDEX {name} IF EXISTS;")
        # 删除约束（严格匹配截图中的约束名称模式）
        for name in self.constraint_names:
            clean_cypher.append(f"DROP CONSTRAINT {name} IF EXISTS;")
        return clean_cypher

    def clean_string(self, s):
        """清洗隐藏字符但保留 → 分隔符"""
        if not isinstance(s, str):
            s = str(s)
        # 移除除 → 外的其他不可见字符
        return re.sub(
            r'[\u200b-\u200f\uFEFF\u00AD\u202C-\u202E\u2060-\u206F\u180E\u200A-\u200D\u034F\u2028\u2029]+', 
            '', 
            s.strip()
        )

    def generate_cypher(self):
        """生成完整Cypher文件"""
        xls = pd.ExcelFile(self.excel_path)
        
        # 初始化输出内容
        nodes_cypher = []
        indexes_cypher = []
        relationships_cypher = []

        # 处理节点数据
        for sheet_name in xls.sheet_names:
            if "节点" in sheet_name:
                try:
                    node_config = self._parse_node_sheet(sheet_name)
                    nodes_cypher.extend(node_config["cypher"])  # 直接使用解析结果
                except Exception as e:
                    print(f"处理节点Sheet失败: {sheet_name}\n错误详情: {str(e)}")
                    raise

        # 处理关系数据 
        for sheet_name in xls.sheet_names:
            if "关系设计" in sheet_name:
                self._parse_relationship_sheet(sheet_name)
        
        # 生成索引和关系
        indexes_cypher = self._generate_indexes()
        relationships_cypher = self._generate_relationships()
        
        # 生成约束
        constraints_cypher = self._generate_constraints()
        
        # 合并索引和约束
        schema_cypher = indexes_cypher + constraints_cypher
        
        # 生成清理脚本
        clean_cypher = self._generate_clean_cypher()

        # 写入清理文件
        with open(os.path.join(self.output_dir, "!clean.cypher"), "w", encoding='utf-8') as f:
            f.write("\n".join(clean_cypher))

        # 写入文件
        with open(os.path.join(self.output_dir, "!nodes.cypher"), "w", encoding='utf-8') as f:
            f.write("\n".join(nodes_cypher))

        # 写入文件（UTF-8编码）
        with open(os.path.join(self.output_dir, "!nodes.cypher"), "w", encoding='utf-8') as f:
            f.write("\n".join(nodes_cypher))
            
        with open(os.path.join(self.output_dir, "!indexes.cypher"), "w", encoding='utf-8') as f:
            f.write("\n".join(indexes_cypher))
            
        with open(os.path.join(self.output_dir, "!relationships.cypher"), "w", encoding='utf-8') as f:
            f.write("\n".join(relationships_cypher))

        print(f"生成文件：\n- {self.output_dir}/!nodes.cypher\n- {self.output_dir}/!indexes.cypher\n- {self.output_dir}/!relationships.cypher")
        
        # print(f"[DEBUG] 解析到的关系配置: {self.relationship_config}")
        print(f"[DEBUG] 节点数据样例: {nodes_cypher[:1] if nodes_cypher else '空'}")
        print(f"[DEBUG] 生成的节点语句数量: {len(nodes_cypher)}")
        print(f"[DEBUG] 生成的索引语句数量: {len(indexes_cypher)}")
        print(f"[DEBUG] 生成的关系语句数量: {len(relationships_cypher)}") 

if __name__ == "__main__":
    initializer = Neo4jInitializer(
        excel_path="config/schema.xlsx",
        csv_dir="data",
        output_dir="data/cypher"
    )
    initializer.generate_cypher()