#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
CSV/XLSX数据导入Neo4j脚本

使用示例:
    python scripts/import_to_neo4j.py --file data.csv --config scripts/config_neo4j.yaml
    python scripts/import_to_neo4j.py --file data.xlsx --clear-first
"""

import os
import sys
import argparse
import logging
from pathlib import Path
from typing import Dict, List, Any, Optional
import pandas as pd
import yaml
from tqdm import tqdm
from neo4j import GraphDatabase
from neo4j.exceptions import ServiceUnavailable, AuthError

# 配置日志
logging.basicConfig(
    level=logging.INFO,
    format='%(asctime)s - %(name)s - %(levelname)s - %(message)s'
)
logger = logging.getLogger(__name__)


class Neo4jImporter:
    """Neo4j数据导入器"""
    
    def __init__(self, uri: str, username: str, password: str, database: str = "neo4j"):
        """
        初始化导入器
        
        Args:
            uri: Neo4j连接URI (例如: bolt://localhost:7687)
            username: 用户名
            password: 密码
            database: 数据库名称
        """
        self.uri = uri
        self.username = username
        self.password = password
        self.database = database
        self.driver = None
        self._connect()
    
    def _connect(self):
        """建立Neo4j连接"""
        try:
            self.driver = GraphDatabase.driver(
                self.uri,
                auth=(self.username, self.password)
            )
            self.driver.verify_connectivity()
            logger.info(f"✅ 成功连接到Neo4j: {self.uri}")
        except AuthError:
            logger.error("❌ 认证失败，请检查用户名和密码")
            sys.exit(1)
        except ServiceUnavailable:
            logger.error(f"❌ 无法连接到Neo4j服务: {self.uri}")
            sys.exit(1)
        except Exception as e:
            logger.error(f"❌ 连接失败: {e}")
            sys.exit(1)
    
    def close(self):
        """关闭连接"""
        if self.driver:
            self.driver.close()
            logger.info("连接已关闭")
    
    def clear_database(self):
        """清空数据库"""
        try:
            with self.driver.session(database=self.database) as session:
                session.run("MATCH (n) DETACH DELETE n")
            logger.info("✅ 数据库已清空")
        except Exception as e:
            logger.error(f"❌ 清空数据库失败: {e}")
            raise
    
    def create_constraints(self):
        """创建约束和索引"""
        constraints = [
            "CREATE CONSTRAINT IF NOT EXISTS FOR (c:CarModel) REQUIRE c.name IS UNIQUE",
            "CREATE CONSTRAINT IF NOT EXISTS FOR (b:Brand) REQUIRE b.name IS UNIQUE",
            "CREATE CONSTRAINT IF NOT EXISTS FOR (m:Manufacturer) REQUIRE m.name IS UNIQUE",
            "CREATE CONSTRAINT IF NOT EXISTS FOR (c:Country) REQUIRE c.name IS UNIQUE",
            "CREATE CONSTRAINT IF NOT EXISTS FOR (bt:BodyType) REQUIRE bt.name IS UNIQUE",
        ]
        
        try:
            with self.driver.session(database=self.database) as session:
                for constraint in constraints:
                    try:
                        session.run(constraint)
                    except Exception as e:
                        # 约束可能已存在，忽略错误
                        pass
            logger.info("✅ 约束和索引创建完成")
        except Exception as e:
            logger.error(f"❌ 创建约束失败: {e}")
    
    def import_car_data(self, df: pd.DataFrame, batch_size: int = 100):
        """
        导入汽车数据到Neo4j
        
        数据模型:
        - CarModel (汽车型号): name, units_sold, low_price, high_price, year_month, is_ev
        - Brand (品牌): name
        - Manufacturer (制造商): name
        - Country (国家): name
        - BodyType (车身类型): name
        
        关系:
        - (CarModel)-[:BELONGS_TO]->(Brand)
        - (Brand)-[:OWNED_BY]->(Manufacturer)
        - (Brand)-[:FROM_COUNTRY]->(Country)
        - (CarModel)-[:HAS_BODY_TYPE]->(BodyType)
        
        Args:
            df: 数据DataFrame
            batch_size: 批量导入大小
        """
        logger.info(f"开始导入 {len(df)} 条数据...")
        
        # 准备批量导入查询（时间序列优化版）
        cypher_query = """
        UNWIND $rows AS row
        
        // 创建或合并汽车型号节点（仅基本属性，不含时间序列数据）
        MERGE (car:CarModel {name: row.car_model})
        SET car.is_ev = row.is_ev
        
        // 创建销售记录节点（时间序列数据）
        WITH car, row
        CREATE (sales:SalesRecord {
            year_month: row.year_month,
            units_sold: row.units_sold,
            low_price: row.low_price,
            high_price: row.high_price
        })
        MERGE (car)-[:HAS_SALES_RECORD]->(sales)
        
        // 创建或合并品牌节点
        WITH car, row
        MERGE (brand:Brand {name: row.brand})
        
        // 创建或合并制造商节点
        WITH car, brand, row
        MERGE (manufacturer:Manufacturer {name: row.manufacturer})
        
        // 创建或合并国家节点（如果有值）
        WITH car, brand, manufacturer, row
        CALL {
            WITH brand, row
            WITH brand, row WHERE row.brand_country IS NOT NULL
            MERGE (country:Country {name: row.brand_country})
            MERGE (brand)-[:FROM_COUNTRY]->(country)
        }
        
        // 创建或合并车身类型节点（如果有值）
        WITH car, brand, manufacturer, row
        CALL {
            WITH car, row
            WITH car, row WHERE row.body_type IS NOT NULL
            MERGE (bodyType:BodyType {name: row.body_type})
            MERGE (car)-[:HAS_BODY_TYPE]->(bodyType)
        }
        
        // 创建关系
        WITH car, brand, manufacturer
        MERGE (car)-[:BELONGS_TO]->(brand)
        MERGE (brand)-[:OWNED_BY]->(manufacturer)
        """
        
        total_imported = 0
        skipped_count = 0
        
        try:
            with self.driver.session(database=self.database) as session:
                # 分批处理
                for i in tqdm(range(0, len(df), batch_size), desc="导入进度"):
                    batch_df = df.iloc[i:i + batch_size]
                    rows = batch_df.to_dict('records')
                    
                    # 数据清洗：处理NaN值和特殊值
                    cleaned_rows = []
                    for row in rows:
                        # 检查必需字段
                        if pd.isna(row.get('car_model')) or pd.isna(row.get('brand')):
                            skipped_count += 1
                            continue
                        
                        # 处理所有字段
                        cleaned_row = {}
                        for key, value in row.items():
                            if pd.isna(value) or value == 'NA' or value == 'N/A':
                                # null值设为None
                                cleaned_row[key] = None
                            elif key in ['low_price', 'high_price', 'units_sold']:
                                # 确保数值类型
                                try:
                                    cleaned_row[key] = float(value) if value is not None else 0
                                except:
                                    cleaned_row[key] = 0
                            else:
                                # 字符串类型，去除首尾空格
                                cleaned_row[key] = str(value).strip() if value is not None else None
                        
                        cleaned_rows.append(cleaned_row)
                    
                    if cleaned_rows:
                        result = session.run(cypher_query, rows=cleaned_rows)
                        summary = result.consume()
                        total_imported += len(cleaned_rows)
            
            logger.info(f"✅ 成功导入 {total_imported} 条数据")
            if skipped_count > 0:
                logger.warning(f"⚠️  跳过 {skipped_count} 条无效数据（缺少必需字段）")
            
        except Exception as e:
            logger.error(f"❌ 导入数据失败: {e}")
            raise
    
    def get_statistics(self) -> Dict[str, Any]:
        """获取导入统计信息"""
        try:
            with self.driver.session(database=self.database) as session:
                # 统计节点数量
                node_stats = session.run("""
                    MATCH (car:CarModel)
                    OPTIONAL MATCH (sales:SalesRecord)
                    OPTIONAL MATCH (brand:Brand)
                    OPTIONAL MATCH (manufacturer:Manufacturer)
                    OPTIONAL MATCH (country:Country)
                    OPTIONAL MATCH (bodyType:BodyType)
                    RETURN 
                        count(DISTINCT car) as car_models,
                        count(DISTINCT sales) as sales_records,
                        count(DISTINCT brand) as brands,
                        count(DISTINCT manufacturer) as manufacturers,
                        count(DISTINCT country) as countries,
                        count(DISTINCT bodyType) as body_types
                """).single()
                
                # 统计关系数量
                rel_stats = session.run("""
                    MATCH ()-[r]->()
                    RETURN type(r) as rel_type, count(r) as count
                """).data()
                
                stats = {
                    "nodes": {
                        "CarModel": node_stats["car_models"],
                        "SalesRecord": node_stats["sales_records"],
                        "Brand": node_stats["brands"],
                        "Manufacturer": node_stats["manufacturers"],
                        "Country": node_stats["countries"],
                        "BodyType": node_stats["body_types"]
                    },
                    "relationships": {item["rel_type"]: item["count"] for item in rel_stats}
                }
                
                return stats
        except Exception as e:
            logger.error(f"获取统计信息失败: {e}")
            return {}


def load_config(config_file: str) -> Dict[str, Any]:
    """加载配置文件"""
    try:
        with open(config_file, 'r', encoding='utf-8') as f:
            config = yaml.safe_load(f)
        return config
    except Exception as e:
        logger.error(f"❌ 加载配置文件失败: {e}")
        sys.exit(1)


def read_data_file(file_path: str) -> pd.DataFrame:
    """读取CSV或XLSX文件"""
    try:
        file_ext = Path(file_path).suffix.lower()
        
        if file_ext == '.csv':
            df = pd.read_csv(file_path)
            logger.info(f"✅ 成功读取CSV文件: {file_path}")
        elif file_ext in ['.xlsx', '.xls']:
            df = pd.read_excel(file_path)
            logger.info(f"✅ 成功读取Excel文件: {file_path}")
        else:
            logger.error(f"❌ 不支持的文件格式: {file_ext}")
            sys.exit(1)
        
        logger.info(f"数据行数: {len(df)}, 列数: {len(df.columns)}")
        logger.info(f"列名: {list(df.columns)}")
        
        return df
    except Exception as e:
        logger.error(f"❌ 读取文件失败: {e}")
        sys.exit(1)


def validate_dataframe(df: pd.DataFrame, required_columns: List[str]) -> bool:
    """验证DataFrame是否包含必需的列"""
    missing_columns = [col for col in required_columns if col not in df.columns]
    if missing_columns:
        logger.error(f"❌ 缺少必需的列: {missing_columns}")
        logger.info(f"当前列: {list(df.columns)}")
        return False
    return True


def main():
    """主函数"""
    parser = argparse.ArgumentParser(description='导入CSV/XLSX数据到Neo4j')
    parser.add_argument('--file', '-f', required=True, help='数据文件路径 (CSV或XLSX)')
    parser.add_argument('--config', '-c', help='配置文件路径 (YAML)', default='scripts/config_neo4j.yaml')
    parser.add_argument('--uri', help='Neo4j URI (例如: bolt://localhost:7687)')
    parser.add_argument('--username', '-u', help='Neo4j用户名', default='neo4j')
    parser.add_argument('--password', '-p', help='Neo4j密码')
    parser.add_argument('--database', '-d', help='数据库名称', default='neo4j')
    parser.add_argument('--batch-size', '-b', type=int, help='批量导入大小', default=100)
    parser.add_argument('--clear-first', action='store_true', help='导入前清空数据库')
    
    args = parser.parse_args()
    
    # 确定配置来源
    if args.uri and args.password:
        # 使用命令行参数
        neo4j_config = {
            'uri': args.uri,
            'username': args.username,
            'password': args.password,
            'database': args.database
        }
        batch_size = args.batch_size
    elif os.path.exists(args.config):
        # 使用配置文件
        config = load_config(args.config)
        neo4j_config = config.get('neo4j', {})
        batch_size = config.get('import_config', {}).get('batch_size', 100)
    else:
        logger.error("❌ 请提供Neo4j连接信息 (通过命令行参数或配置文件)")
        sys.exit(1)
    
    # 读取数据文件
    df = read_data_file(args.file)
    
    # 验证必需列
    required_columns = [
        'car_model', 'units_sold', 'manufacturer', 'low_price', 
        'high_price', 'year_month', 'is_ev', 'body_type', 
        'brand', 'brand_country'
    ]
    
    if not validate_dataframe(df, required_columns):
        sys.exit(1)
    
    # 创建导入器
    importer = Neo4jImporter(
        uri=neo4j_config['uri'],
        username=neo4j_config['username'],
        password=neo4j_config['password'],
        database=neo4j_config.get('database', 'neo4j')
    )
    
    try:
        # 清空数据库（如果指定）
        if args.clear_first:
            confirm = input("⚠️  确定要清空数据库吗? (yes/no): ")
            if confirm.lower() == 'yes':
                importer.clear_database()
            else:
                logger.info("取消清空数据库操作")
        
        # 创建约束
        importer.create_constraints()
        
        # 导入数据
        importer.import_car_data(df, batch_size=batch_size)
        
        # 显示统计信息
        stats = importer.get_statistics()
        logger.info("=" * 50)
        logger.info("导入统计:")
        logger.info(f"  节点统计: {stats.get('nodes', {})}")
        logger.info(f"  关系统计: {stats.get('relationships', {})}")
        logger.info("=" * 50)
        
    finally:
        importer.close()


if __name__ == '__main__':
    main()

