# Python 标准库
import contextlib
import hashlib
import mmap
import os
import random
import re
import resource
import shutil
import signal
import subprocess
import sys
import tempfile
import time
import traceback
from datetime import datetime, timedelta
from pathlib import Path
from typing import Optional, List, Dict, Tuple, Any, Set

# 第三方库
import psutil
from elasticsearch import Elasticsearch
from elasticsearch.helpers import bulk

# 配置文件路径 - 支持多个路径
INPUT_DIRS = [
    Path("/服务器/"),
    #Path("/服务器/3.后勤部门"),
    #Path("/服务器/4.安全部门"),
    # 添加更多路径...
]
MAX_RETRIES = 2  # 最大重试次数
MAX_ES_RETRIES = 3  # ES操作最大重试次数
ES_RETRY_DELAY = 5  # ES重试延迟(秒)
FILE_HANDLE_LIMIT = 65536  # 设置的文件描述符限制
LARGE_FILE_THRESHOLD = 10 * 1024 * 1024  # 10MB，大文件阈值

# Elasticsearch 配置
ES_HOST = "https://localhost:9200"
ES_API_KEY = "T0lqM2RKWUI4clBKeFVBenFaZGc6MTBWZmU3d3VSX0dHQy1iNjBfbHRyUQ=="
ES_CA_CERTS = "/srv/elasticsearch-8.12.2/config/certs/http_ca.crt"

# 排除目录配置（支持相对路径和绝对路径）
EXCLUDE_DIRS = [
    "~$*",  # Office临时文件模式
    "*/temp/*",
    "*/tmp/*",
    "*/cache/*",
    "*/备份/*",
    "*/backup/*",
    "*/回收站/*",
    "*/Recycle Bin/*",
    #"*/0.资源库/*"
]

# 支持的文件扩展名
SUPPORTED_EXTS = {
    '.xlsx', '.xls',   # Excel
    '.docx', '.doc',   # Word
    '.pptx', '.ppt',   # PowerPoint
    '.pdf',            # PDF
    '.html', '.htm',   # HTML
    '.txt',            # 文本
    '.jpg', '.jpeg',   # 图片
    '.png', '.gif', '.bmp',  # 图片
    '.mp3', '.wma', '.amr',  # 音频
    '.mp4', '.mov', '.swf'   # 视频
}

class ScanPhaseProcessor:
    def __init__(self):
        self.shutdown_flag = False
        self.setup_signal_handlers()
        self.set_file_handle_limit()
        self.es_client = self.init_elasticsearch()
        self.file_hash_index_name = "file_hash"
        self.setup_elasticsearch_index()
        self.md5_to_primary = {}  # MD5到主文件路径的映射
        self.duplicate_files_map = {}  # MD5到副本文件路径列表的映射
        self.global_inode_cache = {}  # 全局inode缓存
        self.seen_inodes = set()  # 已处理的inode集合，避免重复处理硬链接
        self.active_skip_count = 0  # 主动跳过的文件计数
        self.processed_paths = set()  # 存储所有已处理文件路径(包括主文件和副本)

    def set_file_handle_limit(self):
        try:
            soft, hard = resource.getrlimit(resource.RLIMIT_NOFILE)
            if soft < FILE_HANDLE_LIMIT:
                new_soft = min(FILE_HANDLE_LIMIT, hard)
                resource.setrlimit(resource.RLIMIT_NOFILE, (new_soft, hard))
        except Exception as e:
            print(f"无法设置文件描述符限制: {str(e)}")

    def setup_signal_handlers(self):
        signal.signal(signal.SIGTERM, self.handle_shutdown)
        signal.signal(signal.SIGINT, self.handle_shutdown)

    def handle_shutdown(self, signum, frame):
        self.shutdown_flag = True
        print("\n接收到关闭信号，正在清理资源...")
        time.sleep(1)
        sys.exit(1)

    def init_elasticsearch(self):
        try:
            es = Elasticsearch(
                hosts=[ES_HOST],
                api_key=ES_API_KEY,
                ca_certs=ES_CA_CERTS,
                request_timeout=60,
                retry_on_timeout=True,
                max_retries=3
            )
            if not es.ping():
                raise RuntimeError("无法连接到Elasticsearch")
            return es
        except Exception as e:
            raise RuntimeError(f"Elasticsearch初始化失败: {str(e)}")

    def setup_elasticsearch_index(self):
        if self.shutdown_flag:
            return
        
        # 文件哈希索引 - 存储完整元数据
        file_hash_mapping = {
            "settings": {
                "number_of_shards": 1,
                "number_of_replicas": 0
            },
            "mappings": {
                "dynamic": "strict",
                "properties": {
                    "md5_hash": {"type": "keyword", "doc_values": True},
                    "primary_file": {"type": "keyword"},  # 主文件路径
                    "duplicate_files": {"type": "keyword"},  # 副本文件路径数组
                    "file_ext": {"type": "keyword"},
                    "modified_date": {"type": "date"},
                    "status": {"type": "keyword"},
                    "error_message": {"type": "text", "index": False},
                    "retry_count": {"type": "short"},
                    "is_duplicate": {"type": "boolean"},  # 是否为副本文件
                    "content": {
                        "type": "text",
                        "analyzer": "ik_max_word",
                        "search_analyzer": "ik_smart"
                    },
                    "lock_time": {"type": "date"}
                }
            }
        }
        
        # 创建索引
        if not self.es_client.indices.exists(index=self.file_hash_index_name):
            self.es_client.options(ignore_status=400).indices.create(
                index=self.file_hash_index_name,
                body=file_hash_mapping
            )
        else:
            # 确保索引映射是最新的
            try:
                self.es_client.indices.put_mapping(
                    index=self.file_hash_index_name,
                    body=file_hash_mapping["mappings"]
                )
            except:
                pass  # 如果映射已存在，忽略错误

    @contextlib.contextmanager
    def es_connection(self):
        retries = 0
        last_error = None
        while retries < MAX_ES_RETRIES and not self.shutdown_flag:
            try:
                yield self.es_client
                return
            except Exception as e:
                last_error = e
                print(f"ES操作失败: {str(e)}")
                retries += 1
                if retries < MAX_ES_RETRIES:
                    print(f"等待 {ES_RETRY_DELAY} 秒后重试ES操作...")
                    time.sleep(ES_RETRY_DELAY)
        raise RuntimeError(f"ES操作超过最大重试次数: {str(last_error)}")

    def compute_md5(self, file_path: Path) -> str:
        """优化的MD5计算函数，使用内存映射处理大文件"""
        file_size = file_path.stat().st_size
        
        if file_size > LARGE_FILE_THRESHOLD:
            # 大文件使用内存映射
            try:
                with open(file_path, "rb") as f:
                    with mmap.mmap(f.fileno(), 0, access=mmap.ACCESS_READ) as mm:
                        return hashlib.md5(mm).hexdigest()
            except Exception as e:
                raise RuntimeError(f"内存映射MD5计算失败: {str(e)}")
        else:
            # 小文件使用分块读取
            hash_md5 = hashlib.md5()
            try:
                with file_path.open("rb") as f:
                    for chunk in iter(lambda: f.read(128 * 1024), b""):
                        hash_md5.update(chunk)
                return hash_md5.hexdigest()
            except Exception as e:
                raise RuntimeError(f"MD5计算失败: {str(e)}")
                        
    def scan_phase(self):
        """第一阶段：优化后的目录扫描（支持多个输入路径）"""
        if self.shutdown_flag:
            return
        
        start_time = time.time()
        print(f"[{datetime.now().strftime('%Y-%m-%d %H:%M:%S')}] 开始扫描阶段...")

        # 1. 加载已有记录到内存（建立MD5到记录的映射）
        existing_records = {}  # {md5_hash: record_dict}
        try:
            with self.es_connection() as es:
                scroll_size = 1000
                body = {
                    "query": {"match_all": {}}, 
                    "_source": ["md5_hash", "primary_file", "duplicate_files", "status", 
                              "error_message", "retry_count", "modified_date",
                              "is_duplicate"]
                }
                res = es.search(
                    index=self.file_hash_index_name,
                    scroll="2m",
                    size=scroll_size,
                    body=body
                )
                scroll_id = res["_scroll_id"]
                total_hits = res["hits"]["total"]["value"]
                print(f"从ES加载了 {total_hits} 条现有记录")
                
                while res["hits"]["hits"] and not self.shutdown_flag:
                    for hit in res["hits"]["hits"]:
                        doc = hit["_source"]
                        md5 = doc["md5_hash"]
                        existing_records[md5] = doc
                        
                        # 构建MD5到主文件的映射
                        if not doc.get("is_duplicate", True):
                            self.md5_to_primary[md5] = doc["primary_file"]
                        
                        # 将主文件路径添加到已处理集合
                        self.processed_paths.add(doc["primary_file"])
                        
                        # 将所有副本文件路径添加到已处理集合
                        for dup_path in doc.get("duplicate_files", []):
                            self.processed_paths.add(dup_path)
                    
                    res = es.scroll(scroll_id=scroll_id, scroll="2m")
                es.clear_scroll(scroll_id=scroll_id)
        except Exception as e:
            print(f"加载已有记录失败: {str(e)}")

        # 2. 扫描文件系统
        total_files = 0
        excluded_count = 0
        md5_dict = {}  # 记录MD5出现次数
        all_files = []
        skipped_files = 0  # 记录跳过的文件数（已在ES中存在）
        skipped_unsupported = 0  # 记录跳过的未支持文件数

        # 预处理排除目录规则
        exclude_patterns = []
        for pattern in EXCLUDE_DIRS:
            pattern = pattern.replace("\\", "/").lower()
            if pattern == "~$*":
                exclude_patterns.append(("office_temp", None))
                continue
            if "*" in pattern:
                regex_pattern = re.escape(pattern)
                regex_pattern = regex_pattern.replace(r"\*", ".*")
                regex_pattern = f"^{regex_pattern}$"
                exclude_patterns.append(("regex", re.compile(regex_pattern, re.IGNORECASE)))
            else:
                exclude_patterns.append(("exact", pattern.lower()))
                
        # 扫描所有输入目录
        for input_dir in INPUT_DIRS:
            print(f"扫描目录: {input_dir}")
            if not input_dir.exists():
                print(f"警告: 目录不存在 - {input_dir}")
                continue
                
            # 扫描单个目录
            result = self.scan_directory(input_dir, exclude_patterns)
            all_files.extend(result["files"])
            excluded_count += result["excluded"]
            skipped_files += result["skipped_files"]
            skipped_unsupported += result["skipped_unsupported"]
            self.active_skip_count += result["active_skips"]  # 主动跳过计数
            
            # 合并MD5统计
            for md5, count in result["md5_dict"].items():
                if md5 in md5_dict:
                    md5_dict[md5] += count
                else:
                    md5_dict[md5] = count
                    
        total_files = len(all_files) + skipped_files + excluded_count + skipped_unsupported + self.active_skip_count
        print(f"扫描完成: 总文件={total_files}, 排除={excluded_count}, 跳过已处理={skipped_files}, "
              f"跳过未支持={skipped_unsupported}, 主动跳过={self.active_skip_count}, 待处理={len(all_files)}")

        # 3. 处理文件并建立索引
        processed = 0
        success_count = 0
        failure_count = 0

        # 如果没有需要处理的文件，提前返回
        if not all_files:
            print("没有需要处理的文件")
            return

        # 顺序处理每个文件
        for file_path in all_files:
            if self.shutdown_flag:
                break
                
            processed += 1
            try:
                if self._process_and_index_single_file(file_path):
                    success_count += 1
                else:
                    failure_count += 1
            except Exception as e:
                print(f"文件处理出错: {str(e)}")
                failure_count += 1

            # 每100个文件打印一次进度
            if processed % 2000 == 0:
                print(f"已处理 {processed}/{len(all_files)} 文件, 成功={success_count}, 失败={failure_count}")
                
        # 显示扫描摘要
        total_time = time.time() - start_time
        self._show_scan_summary(
            total_files=total_files,
            excluded_count=excluded_count,
            skipped_files=skipped_files,
            skipped_unsupported=skipped_unsupported,
            active_skips=self.active_skip_count,
            elapsed_seconds=total_time,
            success_count=success_count,
            failure_count=failure_count,
            md5_dict=md5_dict
        )
        
    def scan_directory(self, root_dir: Path, exclude_patterns: list):
        """扫描单个目录"""
        local_files = []  # 需要处理的文件
        excluded = 0
        skipped_files = 0
        skipped_unsupported = 0
        active_skips = 0  # 主动跳过的文件计数
        local_md5_dict = {}
        
        # 遍历目录
        for file_path in root_dir.rglob("*"):
            if self.shutdown_flag:
                break
                
            if not file_path.is_file():
                continue

            # 获取绝对路径并标准化
            abs_path = str(file_path.absolute()).replace("\\", "/")
            
            # 检查文件扩展名
            file_ext = file_path.suffix.lower()
            if file_ext not in SUPPORTED_EXTS:
                skipped_unsupported += 1
                continue

            # 检查排除规则
            file_name = file_path.name
            file_name_lower = file_name.lower()
            full_path = str(file_path).replace("\\", "/")
            should_exclude = False
            
            for match_type, pattern in exclude_patterns:
                if match_type == "office_temp" and file_name_lower.startswith("~$"):
                    should_exclude = True
                    break
                elif match_type == "exact" and full_path.lower() == pattern:
                    should_exclude = True
                    break
                elif match_type == "regex" and pattern.search(full_path.lower()):
                    should_exclude = True
                    break
                    
            if should_exclude:
                excluded += 1
                continue
                
            # 快速排除：检查文件路径是否已在ES中存在
            if abs_path in self.processed_paths:
                skipped_files += 1
                continue
                
            try:
                # 获取文件状态
                st = file_path.stat()
                file_size = st.st_size
                inode_key = (st.st_ino, st.st_dev, file_size)
                
                # 检查硬链接 - 如果inode已经处理过，则跳过
                if inode_key in self.seen_inodes:
                    active_skips += 1
                    continue
                self.seen_inodes.add(inode_key)
                
                # 检查全局inode缓存
                md5 = None
                if inode_key in self.global_inode_cache:
                    cached_mtime, cached_md5 = self.global_inode_cache[inode_key]
                    if cached_mtime == st.st_mtime:
                        md5 = cached_md5
                
                # 需要计算MD5
                if md5 is None:
                    try:
                        md5 = self.compute_md5(file_path)
                    except Exception as e:
                        print(f"MD5计算失败: {file_path} - {str(e)}")
                        continue
                    
                    # 更新缓存
                    self.global_inode_cache[inode_key] = (st.st_mtime, md5)
                
                # 更新MD5统计
                if md5 in local_md5_dict:
                    local_md5_dict[md5] += 1
                else:
                    local_md5_dict[md5] = 1
                    
                # 确定文件角色（主文件或副本）
                is_duplicate = False
                if md5 in self.md5_to_primary:
                    primary_path = self.md5_to_primary[md5]
                    if abs_path != primary_path:
                        is_duplicate = True
                        
                        # 添加到副本列表
                        if md5 not in self.duplicate_files_map:
                            self.duplicate_files_map[md5] = []
                        self.duplicate_files_map[md5].append(abs_path)
                else:
                    # 首次出现的MD5，设为主文件
                    self.md5_to_primary[md5] = abs_path
                    # 初始化副本列表
                    self.duplicate_files_map[md5] = []
                
                # 添加到处理列表
                local_files.append(file_path)
                
            except Exception as e:
                print(f"文件处理错误: {file_path} - {str(e)}")
                continue
                
        return {
            "files": local_files,
            "excluded": excluded,
            "skipped_files": skipped_files,
            "skipped_unsupported": skipped_unsupported,
            "active_skips": active_skips,
            "md5_dict": local_md5_dict
        }

    def _show_scan_summary(self, total_files: int, excluded_count: int, 
                          skipped_files: int, skipped_unsupported: int,
                          active_skips: int,
                          elapsed_seconds: float = 0.0,
                          success_count: int = 0, failure_count: int = 0,
                          md5_dict: dict = None):
        """扫描统计摘要 - 改进版（数字右对齐）"""
        
        # 计算重复文件数
        duplicate_files = sum(count-1 for count in md5_dict.values() if count > 1) if md5_dict else 0
        
        # 计算实际处理的文件
        processed_files = total_files - skipped_files - skipped_unsupported - excluded_count - active_skips

        # 添加耗时统计（转换为时分秒格式）
        hours = int(elapsed_seconds // 3600)
        minutes = int((elapsed_seconds % 3600) // 60)
        seconds = int(elapsed_seconds % 60)
        time_str = f"{hours:02d}:{minutes:02d}:{seconds:02d}"
        
        # 定义对齐参数
        num_width = 12  # 数字字段宽度
        desc_width = 20  # 描述字段宽度
        
        print("\n" + "="*60)
        print("扫描统计摘要".center(60))
        print("="*60)
        
        # 文件系统统计
        print("[文件系统统计]")
        print(f"  • 总文件数:        {total_files:>{num_width},}")
        print(f"  • 排除文件:        {excluded_count:>{num_width},} (符合排除规则)")
        print(f"  • 跳过已处理:      {skipped_files:>{num_width},} (已在ES中存在)")
        print(f"  • 跳过未支持:      {skipped_unsupported:>{num_width},} (不支持的扩展名)")
        print(f"  • 主动跳过:        {active_skips:>{num_width},} (硬链接/其他原因)")
        print(f"  • 实际处理:        {processed_files:>{num_width},}")
        print(f"     ├─ 成功提交:    {success_count:>{num_width},}")
        print(f"     └─ 失败提交:    {failure_count:>{num_width},}")
        print(f"  • 重复文件数:      {duplicate_files:>{num_width},} (相同MD5)")
            
        # 在摘要末尾添加耗时显示
        print("\n[耗时统计]")
        print(f"  • 总耗时:          {time_str:>{num_width}}")
        print("="*60)
        
    def _process_and_index_single_file(self, file_path):
        """处理单个文件并直接索引到Elasticsearch"""
        try:
            st = file_path.stat()
            
            # 处理文件修改时间，防止时间戳超出范围
            try:
                file_mtime = datetime.fromtimestamp(st.st_mtime)
            except (ValueError, OSError):
                # 如果时间戳超出范围，使用固定日期 (2000-01-01)
                file_mtime = datetime(2000, 1, 1)
                
            full_path = str(file_path).replace("\\", "/")
            
            # 从全局缓存获取MD5
            inode_key = (st.st_ino, st.st_dev, st.st_size)
            if inode_key in self.global_inode_cache:
                _, md5 = self.global_inode_cache[inode_key]
            else:
                # 如果缓存中没有，重新计算
                md5 = self.compute_md5(file_path)
            
            # 确定文件属性
            is_duplicate = False
            primary_file = full_path
            duplicate_files = []
            
            if md5 in self.md5_to_primary:
                primary_file = self.md5_to_primary[md5]
                is_duplicate = (primary_file != full_path)
                duplicate_files = self.duplicate_files_map.get(md5, [])
            
            new_doc = {
                "md5_hash": md5,
                "primary_file": primary_file,
                "duplicate_files": duplicate_files,
                "file_ext": file_path.suffix.lower(),
                "modified_date": file_mtime,
                "is_duplicate": is_duplicate,
                "status": "pending",
                "error_message": None,
                "retry_count": 0,
            }

            # 直接索引单个文档到Elasticsearch
            return self.index_single_document(new_doc)
        except Exception as e:
            print(f"文件处理失败: {file_path} - {str(e)}")
            return False
            
    def index_single_document(self, doc: Dict) -> bool:
        """索引单个文档到Elasticsearch"""
        if not doc or self.shutdown_flag:
            return False
            
        retries = 0
        while retries < MAX_ES_RETRIES and not self.shutdown_flag:
            try:
                with self.es_connection() as es:
                    # 使用options方法设置超时
                    es.options(request_timeout=30).index(
                        index=self.file_hash_index_name,
                        id=doc["md5_hash"],
                        body=doc
                    )
                return True
            except Exception as e:
                print(f"文档索引失败 (重试 {retries+1}/{MAX_ES_RETRIES}): {str(e)}")
                retries += 1
                if retries < MAX_ES_RETRIES:
                    time.sleep(ES_RETRY_DELAY)
        return False

    def run(self):
        """主运行方法（只执行扫描阶段）"""
        try:
            # 确保所有输入目录存在
            for input_dir in INPUT_DIRS:
                input_dir.mkdir(parents=True, exist_ok=True)
                
            self.scan_phase()
        except Exception as e:
            print(f"扫描阶段出错: {str(e)}")
            raise
            
def main():
    try:
        processor = ScanPhaseProcessor()
        processor.run()
    except KeyboardInterrupt:
        print("\n[提示] 用户中断操作")
    except Exception as e:
        print(f"\n[致命错误] {str(e)}")
        sys.exit(1)

if __name__ == "__main__":
    main()