#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
云盘资源导入器
支持多线程处理、批量数据库操作和智能覆盖策略
"""

import os
import sys
import json
import logging
import threading
from datetime import datetime
from typing import Dict, List, Optional, Tuple, Set
from pathlib import Path
from concurrent.futures import ThreadPoolExecutor, as_completed
from queue import Queue
from collections import defaultdict

# 添加项目根目录到路径
project_root = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
if project_root not in sys.path:
    sys.path.insert(0, project_root)

from sqlalchemy import Column, Integer, String, DateTime, Text, create_engine, func
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import sessionmaker
from contextlib import contextmanager

# 导入项目模块
from src.utils.config import get_database_url, get_extension_config, get_log_config
from src.common.database import get_session, init_global_session
from src.models import VirtualFile, VirtualStorage
from src.vfs.virtual_file_system import VirtualFileSystem

# 设置日志
log_config = get_log_config()
logging.basicConfig(
    level=getattr(logging, log_config['level']),
    format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
    handlers=[
        logging.FileHandler('logs/cloud_importer.log'),
        logging.StreamHandler()
    ]
)
logger = logging.getLogger('CloudImporter')

# 创建Base用于cloud_resources_copy1表模型
Base = declarative_base()

class CloudResourcesCopy1(Base):
    """云盘资源表模型"""
    __tablename__ = 'cloud_resources_copy1'
    
    id = Column(Integer, primary_key=True, autoincrement=True)
    clouds_type = Column(String(50), nullable=False, comment='云盘类型：天翼云，百度，阿里云，115等')
    share_code = Column(String(100), nullable=False, comment='分享码')
    access_code = Column(String(100), default='', comment='访问码')
    share_name = Column(String(500), default='', comment='分享名称')
    full_url = Column(String(1000), default='', comment='分享链接')
    share_status = Column(String(50), default='active', comment='分享状态')
    share_time = Column(DateTime, comment='分享时间')
    file_info_json = Column(Text, comment='文件信息json字符串')
    created_at = Column(DateTime, default=datetime.now, comment='创建时间')
    updated_at = Column(DateTime, default=datetime.now, onupdate=datetime.now, comment='更新时间')

class CloudImporter:
    """云盘资源导入器"""
    
    def __init__(self, max_workers: int = None, batch_size: int = None):
        """初始化导入器
        
        Args:
            max_workers: 最大线程数
            batch_size: 批量处理大小
        """
        # 从配置获取默认值
        import_config = get_extension_config('import')
        self.max_workers = max_workers or import_config.get('max_workers', 4)
        self.batch_size = batch_size or import_config.get('batch_size', 100)
        
        self.vfs = None  # 将在需要时初始化
        
        # 创建数据库引擎和会话工厂
        mysql_url = get_database_url()
        self.mysql_engine = create_engine(mysql_url)
        self.MySQLSessionLocal = sessionmaker(bind=self.mysql_engine)
        
        # 本地WebDAV数据库引擎
        from src.common.database import get_engine
        self.engine = get_engine()
        self.SessionLocal = sessionmaker(bind=self.engine)
        
        # 统计信息
        self.import_stats = {
            'total_processed': 0,
            'new_imports': 0,
            'updated_imports': 0,
            'skipped': 0,
            'errors': 0
        }
        
        # 按share_code的详细统计
        self.share_code_stats = {}
        
        # 线程安全的锁
        self.stats_lock = threading.Lock()
        self.directory_cache_lock = threading.Lock()
        
        # 目录缓存，避免重复创建
        self.created_directories: Set[str] = set()
        
        # 批量操作队列
        self.batch_operations = Queue()
    
    @contextmanager
    def get_session(self):
        """获取本地WebDAV数据库会话"""
        session = self.SessionLocal()
        try:
            yield session
            session.commit()
        except Exception as e:
            session.rollback()
            logger.error(f"本地数据库操作失败: {e}")
            raise
        finally:
            session.close()
    
    @contextmanager
    def get_mysql_session(self):
        """获取MySQL数据库会话"""
        session = self.MySQLSessionLocal()
        try:
            yield session
            session.commit()
        except Exception as e:
            session.rollback()
            logger.error(f"MySQL数据库操作失败: {e}")
            raise
        finally:
            session.close()
    
    def parse_file_info_json(self, file_info_json: str) -> List[Dict]:
        """解析文件信息JSON"""
        try:
            if not file_info_json or file_info_json.strip() == '':
                return []
            
            # 尝试解析JSON
            data = json.loads(file_info_json)
            
            # 处理不同的JSON结构
            if isinstance(data, list):
                return data
            elif isinstance(data, dict):
                # 如果是字典，尝试获取不同的字段名
                if 'fileInfo' in data:
                    return data['fileInfo']
                elif 'fileList' in data:
                    return data['fileList']
                elif 'files' in data:
                    return data['files']
                else:
                    # 如果字典本身就是文件信息，包装成列表
                    return [data]
            elif isinstance(data, str):
                # 处理双重编码的JSON字符串
                try:
                    inner_data = json.loads(data)
                    return self.parse_file_info_json(json.dumps(inner_data))
                except json.JSONDecodeError:
                    logger.warning(f"字符串类型的JSON无法进一步解析: {data[:100]}...")
                    return []
            else:
                logger.warning(f"未知的JSON结构类型: {type(data)}")
                return []
                
        except json.JSONDecodeError as e:
            logger.error(f"JSON解析失败: {e}")
            return []
        except Exception as e:
            logger.error(f"解析文件信息时发生错误: {e}")
            return []
    
    def normalize_path(self, path: str) -> str:
        """标准化路径"""
        if not path:
            return '/'
        
        # 确保以/开头
        if not path.startswith('/'):
            path = '/' + path
        
        # 移除重复的斜杠
        while '//' in path:
            path = path.replace('//', '/')
        
        # 移除末尾的斜杠（除非是根目录）
        if path != '/' and path.endswith('/'):
            path = path[:-1]
        
        return path
    
    def generate_virtual_path(self, resource: CloudResourcesCopy1, file_info: Dict) -> str:
        """生成虚拟路径"""
        try:
            # 获取文件名
            file_name = file_info.get('fileName', file_info.get('name', ''))
            if not file_name:
                logger.warning(f"文件信息缺少文件名: {file_info}")
                return None
            
            # 获取share_code
            share_code = resource.share_code
            if not share_code or len(share_code) < 3:
                logger.warning(f"无效的share_code: {share_code}")
                return None
            
            # 生成目录结构：云盘类型/一级目录/二级目录/三级目录/文件夹/文件名
            clouds_type = resource.clouds_type.lower()  # 云盘类型转小写
            level1 = share_code[0].lower()  # 第一位字符转小写
            level2 = share_code[1:3].lower()  # 第二三位字符转小写
            level3 = share_code  # 完整share_code保持原样
            
            # 构建路径
            virtual_path = f"/{clouds_type}/{level1}/{level2}/{level3}/folder/{file_name}"
            
            return self.normalize_path(virtual_path)
            
        except Exception as e:
            logger.error(f"生成虚拟路径时发生错误: {e}")
            return None
    
    def import_resources(self, limit: int = None, offset: int = 0) -> Dict[str, int]:
        """导入云盘资源
        
        Args:
            limit: 限制导入数量
            offset: 偏移量
            
        Returns:
            导入统计信息
        """
        logger.info(f"开始导入云盘资源，限制: {limit}, 偏移: {offset}")
        
        try:
            with self.get_mysql_session() as mysql_session:
                # 查询云盘资源
                query = mysql_session.query(CloudResourcesCopy1)
                if limit:
                    query = query.limit(limit)
                if offset:
                    query = query.offset(offset)
                
                resources = query.all()
                logger.info(f"找到 {len(resources)} 个云盘资源")
                
                # 使用线程池处理
                with ThreadPoolExecutor(max_workers=self.max_workers) as executor:
                    futures = []
                    
                    # 分批处理
                    for i in range(0, len(resources), self.batch_size):
                        batch = resources[i:i + self.batch_size]
                        future = executor.submit(self._process_resource_batch, batch)
                        futures.append(future)
                    
                    # 等待所有任务完成
                    for future in as_completed(futures):
                        try:
                            future.result()
                        except Exception as e:
                            logger.error(f"批处理失败: {e}")
                            with self.stats_lock:
                                self.import_stats['errors'] += 1
                
                logger.info(f"导入完成，统计信息: {self.import_stats}")
                return self.import_stats
                
        except Exception as e:
            logger.error(f"导入过程中发生错误: {e}")
            raise
    
    def _process_resource_batch(self, resources: List[CloudResourcesCopy1]):
        """处理资源批次"""
        for resource in resources:
            try:
                self._process_single_resource(resource)
            except Exception as e:
                logger.error(f"处理资源 {resource.share_code} 失败: {e}")
                with self.stats_lock:
                    self.import_stats['errors'] += 1
    
    def _process_single_resource(self, resource: CloudResourcesCopy1):
        """处理单个资源"""
        try:
            # 解析文件信息
            file_infos = self.parse_file_info_json(resource.file_info_json)
            if not file_infos:
                logger.debug(f"资源 {resource.share_code} 没有文件信息")
                with self.stats_lock:
                    self.import_stats['skipped'] += 1
                return
            
            # 处理每个文件
            for file_info in file_infos:
                virtual_path = self.generate_virtual_path(resource, file_info)
                if not virtual_path:
                    continue
                
                # 创建目录结构
                dir_path = os.path.dirname(virtual_path)
                if dir_path != '/':
                    self._ensure_directory_exists(dir_path)
                
                # 创建或更新文件
                self._create_or_update_file(virtual_path, file_info, resource)
            
            with self.stats_lock:
                self.import_stats['total_processed'] += 1
                
        except Exception as e:
            logger.error(f"处理资源 {resource.share_code} 时发生错误: {e}")
            raise
    
    def _ensure_directory_exists(self, dir_path: str):
        """确保目录存在"""
        with self.directory_cache_lock:
            if dir_path in self.created_directories:
                return
            
            try:
                if not self.vfs.exists(dir_path):
                    success = self.vfs.create_folder(dir_path)
                    if success:
                        self.created_directories.add(dir_path)
                        logger.debug(f"创建目录: {dir_path}")
                    else:
                        logger.warning(f"创建目录失败: {dir_path}")
                else:
                    self.created_directories.add(dir_path)
            except Exception as e:
                logger.error(f"检查创建目录 {dir_path} 失败: {e}")
    
    def _create_or_update_file(self, virtual_path: str, file_info: Dict, resource: CloudResourcesCopy1):
        """创建或更新文件"""
        try:
            # 检查文件是否存在
            existing_file = None
            try:
                with get_session() as session:
                    existing_file = self.vfs._find_file_by_path(session, virtual_path)
            except:
                pass
            
            # 创建存储记录
            storage_id = self._create_storage_record(file_info, resource)
            if not storage_id:
                logger.warning(f"创建存储记录失败: {virtual_path}")
                return
            
            if existing_file:
                # 更新现有文件
                success = self.vfs.update_file_metadata(virtual_path, {
                    'storage_file_id': storage_id,
                    'file_size': file_info.get('fileSize', file_info.get('size', 0))
                })
                if success:
                    with self.stats_lock:
                        self.import_stats['updated_imports'] += 1
                    logger.debug(f"更新文件: {virtual_path}")
            else:
                # 创建新文件
                file_name = os.path.basename(virtual_path)
                parent_path = os.path.dirname(virtual_path)
                
                success = self.vfs.create_file(
                    parent_path=parent_path,
                    file_name=file_name,
                    storage_file_id=storage_id,
                    file_size=file_info.get('fileSize', file_info.get('size', 0))
                )
                
                if success:
                    with self.stats_lock:
                        self.import_stats['new_imports'] += 1
                    logger.debug(f"创建文件: {virtual_path}")
                else:
                    logger.warning(f"创建文件失败: {virtual_path}")
                    
        except Exception as e:
            logger.error(f"创建/更新文件 {virtual_path} 失败: {e}")
    
    def _create_storage_record(self, file_info: Dict, resource: CloudResourcesCopy1) -> Optional[str]:
        """创建存储记录"""
        try:
            import hashlib
            
            # 生成文件ID
            file_name = file_info.get('fileName', file_info.get('name', ''))
            file_size = file_info.get('fileSize', file_info.get('size', 0))
            file_id_content = f"{resource.share_code}_{file_name}_{file_size}"
            file_id = hashlib.md5(file_id_content.encode('utf-8')).hexdigest()
            
            # 检查是否已存在
            with get_session() as session:
                existing = session.query(VirtualStorage).filter_by(file_id=file_id).first()
                if existing:
                    return file_id
                
                # 创建新的存储记录
                file_extension = os.path.splitext(file_name)[1].lower().lstrip('.')
                media_type = self._get_media_type(file_extension)
                
                storage = VirtualStorage(
                    file_id=file_id,
                    original_filename=file_name,
                    file_size_bytes=int(file_size) if file_size else 0,
                    media_type=media_type,
                    file_extension=file_extension,
                    cloud_provider='tianyi',
                    cloud_file_id=file_info.get('fileId', file_info.get('id', '')),
                    status=1
                )
                
                session.add(storage)
                session.commit()
                
                return file_id
                
        except Exception as e:
            logger.error(f"创建存储记录失败: {e}")
            return None
    
    def _get_media_type(self, file_extension: str) -> int:
        """根据文件扩展名获取媒体类型"""
        if not file_extension:
            return 0  # 未知类型
        
        ext = file_extension.lower()
        
        media_types = {
            # 视频文件
            'mp4': 1, 'avi': 1, 'mkv': 1, 'mov': 1, 'wmv': 1, 'flv': 1, 'webm': 1, 'm4v': 1,
            'mpg': 1, 'mpeg': 1, '3gp': 1, 'ts': 1, 'rmvb': 1, 'rm': 1,
            # 音频文件
            'mp3': 2, 'wav': 2, 'flac': 2, 'aac': 2, 'ogg': 2, 'wma': 2, 'm4a': 2, 'ape': 2,
            # 图片文件
            'jpg': 3, 'jpeg': 3, 'png': 3, 'gif': 3, 'bmp': 3, 'webp': 3, 'svg': 3, 'ico': 3,
            'tiff': 3, 'tif': 3, 'raw': 3,
            # 文档文件
            'pdf': 4, 'doc': 4, 'docx': 4, 'xls': 4, 'xlsx': 4, 'ppt': 4, 'pptx': 4, 'txt': 4,
            'rtf': 4, 'odt': 4, 'ods': 4, 'odp': 4,
            # 压缩文件
            'zip': 5, 'rar': 5, '7z': 5, 'tar': 5, 'gz': 5, 'bz2': 5, 'xz': 5, 'iso': 5
        }
        
        return media_types.get(ext, 0)

def main():
    """主函数"""
    import argparse
    
    parser = argparse.ArgumentParser(description='云盘资源导入器')
    parser.add_argument('--limit', type=int, help='限制导入数量')
    parser.add_argument('--offset', type=int, default=0, help='偏移量')
    parser.add_argument('--workers', type=int, help='线程数')
    parser.add_argument('--batch-size', type=int, help='批量大小')
    
    args = parser.parse_args()
    
    # 创建导入器
    importer = CloudImporter(
        max_workers=args.workers,
        batch_size=args.batch_size
    )
    
    # 执行导入
    try:
        stats = importer.import_resources(
            limit=args.limit,
            offset=args.offset
        )
        
        print(f"导入完成:")
        print(f"  总处理数: {stats['total_processed']}")
        print(f"  新导入: {stats['new_imports']}")
        print(f"  更新: {stats['updated_imports']}")
        print(f"  跳过: {stats['skipped']}")
        print(f"  错误: {stats['errors']}")
        
    except Exception as e:
        logger.error(f"导入失败: {e}")
        return 1
    
    return 0

if __name__ == '__main__':
    exit(main())