import logging
import time
import os
import threading
from flask import Blueprint, jsonify
from watchdog.events import FileSystemEventHandler
from watchdog.observers import Observer

from app.utils.spi_file_parser import SPIFileParser
from app.config.paths import SMT1_SPI_PATH, SMT2_SPI_PATH

# 配置日志
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(message)s')
logger = logging.getLogger(__name__)

# 创建蓝图
smt_spi_analysis_bp = Blueprint('smt_spi_analysis', __name__)

# 要监控的网络共享路径（从配置读取，兼容Windows UNC）
SMT1_NETWORK_PATH = SMT1_SPI_PATH
SMT2_NETWORK_PATH = SMT2_SPI_PATH

# 全局变量
smt1_current_model = None  # SMT1当前机种
smt2_current_model = None  # SMT2当前机种
smt1_data_cache = []  # SMT1数据缓存
smt2_data_cache = []  # SMT2数据缓存
smt1_cache_lock = threading.Lock()  # SMT1线程锁
smt2_cache_lock = threading.Lock()  # SMT2线程锁
MAX_CACHE_SIZE = 25  # 最大缓存数量

# 文件处理跟踪
processed_files_lock = threading.Lock()
smt1_processed_files = set()  # SMT1已处理文件集合
smt2_processed_files = set()  # SMT2已处理文件集合
MAX_PROCESSED_FILES = 10000  # 最大记录数量

@smt_spi_analysis_bp.route('/api/smt1/spi/data', methods=['GET'])
def get_smt1_data():
    """获取SMT1线别当前缓存的所有数据"""
    with smt1_cache_lock:
        return jsonify(smt1_data_cache)

@smt_spi_analysis_bp.route('/api/smt2/spi/data', methods=['GET'])
def get_smt2_data():
    """获取SMT2线别当前缓存的所有数据"""
    with smt2_cache_lock:
        return jsonify(smt2_data_cache)

class SPIFileHandler(FileSystemEventHandler):
    def __init__(self, line_id, target_model=None):
        self.parser = SPIFileParser(target_model)
        self.line_id = line_id  # 线别标识

    def on_created(self, event):
        if not event.is_directory:
            file_path = event.src_path
            # 检查是否为CSV文件
            if not file_path.endswith('.csv'):
                return

            # 获取对应的文件集合
            processed_files = smt1_processed_files if self.line_id == "SMT1" else smt2_processed_files

            # 检查文件是否已处理
            with processed_files_lock:
                if file_path in processed_files:
                    return  # 文件已处理，跳过

            logger.info(f"[{self.line_id}] 检测到新文件: {file_path}")
            self.process_file(file_path)

    def process_file(self, file_path):
        """处理SPI文件"""

        global smt1_processed_files, smt2_processed_files

        try:
            # 解析文件
            model_name, structured_data = self.parser.process_file(file_path)

            if not structured_data:
                logger.warning(f"[{self.line_id}] 文件解析结果为空: {file_path}")
                return

            print(f"\n===============[{self.line_id}]文件解析结果===============")
            print(f"文件名: {os.path.basename(file_path)}")
            print(f"机种: {model_name}")
            print(f"板子ID: {structured_data['id']}")
            print(f"平均厚度: {structured_data['height_avg']:.4f}")
            print(f"平均面积: {structured_data['area_avg']:.4f}")
            print(f"平均体积: {structured_data['volume_avg']:.4f}")
            print(f"厚度标准差: {structured_data['height_std']:.4f}")
            print(f"面积标准差: {structured_data['area_std']:.4f}")
            print(f"体积标准差: {structured_data['volume_std']:.4f}")
            print(f"===============结束===============\n")

            # 创建数据对象
            cache_data = {
                'id': structured_data['id'],
                'model': model_name,
                'line_id': self.line_id,
                'height_avg': float(structured_data['height_avg']),
                'area_avg': float(structured_data['area_avg']),
                'volume_avg': float(structured_data['volume_avg']),
                'height_std': float(structured_data['height_std']),
                'area_std': float(structured_data['area_std']),
                'volume_std': float(structured_data['volume_std']),
                'timestamp': time.strftime("%Y-%m-%d %H:%M:%S"),
                'filename': os.path.basename(file_path)
            }

            # 根据线别更新缓存
            if self.line_id == "SMT1":
                global smt1_current_model, smt1_data_cache
                with smt1_cache_lock:
                    # 如果检测到新机种，清空缓存
                    if smt1_current_model != model_name:
                        logger.info(f"[{self.line_id}] 检测到新机种: {model_name}，清空缓存")
                        smt1_data_cache.clear()
                        smt1_current_model = model_name

                    # 添加新数据
                    smt1_data_cache.append(cache_data)

                    # 保持缓存大小不超过最大值
                    if len(smt1_data_cache) > MAX_CACHE_SIZE:
                        smt1_data_cache.pop(0)  # 移除最旧的数据

                # 记录已处理文件
                with processed_files_lock:
                    smt1_processed_files.add(file_path)
                    # 管理集合大小
                    if len(smt1_processed_files) > MAX_PROCESSED_FILES:
                        # 删除最老的20%文件记录
                        files_to_remove = int(MAX_PROCESSED_FILES * 0.2)
                        smt1_processed_files = set(list(smt1_processed_files)[files_to_remove:])
            else:  # SMT2
                global smt2_current_model, smt2_data_cache
                with smt2_cache_lock:
                    # 如果检测到新机种，清空缓存
                    if smt2_current_model != model_name:
                        logger.info(f"[{self.line_id}] 检测到新机种: {model_name}，清空缓存")
                        smt2_data_cache.clear()
                        smt2_current_model = model_name

                    # 添加新数据
                    smt2_data_cache.append(cache_data)

                    # 保持缓存大小不超过最大值
                    if len(smt2_data_cache) > MAX_CACHE_SIZE:
                        smt2_data_cache.pop(0)  # 移除最旧的数据

                # 记录已处理文件
                with processed_files_lock:
                    smt2_processed_files.add(file_path)
                    # 管理集合大小
                    if len(smt2_processed_files) > MAX_PROCESSED_FILES:
                        # 删除最老的20%文件记录
                        files_to_remove = int(MAX_PROCESSED_FILES * 0.2)
                        smt2_processed_files = set(list(smt2_processed_files)[files_to_remove:])
        except Exception as e:
            logger.error(f"[{self.line_id}] 处理文件出错: {file_path}, 错误: {str(e)}")

def poll_directory(path, line_id, handler, interval=10):
    global smt1_processed_files, smt2_processed_files
    logger.info(f"[{line_id}] 启动轮询线程，间隔{interval}秒")
    """轮询目录查找新文件"""
    logger.info(f"[{line_id}] 启动轮询线程，间隔{interval}秒")
    last_scan_time = 0  # 上次扫描时间戳

    # 获取对应的已处理文件集合
    processed_files = smt1_processed_files if line_id == "SMT1" else smt2_processed_files

    while True:
        try:
            current_time = time.time()

            # 检查目录是否存在
            if not os.path.exists(path):
                logger.error(f"[{line_id}] 目录不存在: {path}")
                time.sleep(interval)
                continue

            # 只获取CSV文件
            files_to_check = []
            try:
                for filename in os.listdir(path):
                    if not filename.endswith('.csv'):
                        continue

                    file_path = os.path.join(path, filename)
                    if not os.path.isfile(file_path):
                        continue

                    # 获取文件修改时间
                    try:
                        mod_time = os.path.getmtime(file_path)
                    except OSError:
                        continue  # 跳过无法访问的文件

                    files_to_check.append((file_path, mod_time))
            except OSError as e:
                logger.error(f"[{line_id}] 轮询目录出错: {str(e)}")
                time.sleep(interval)
                continue

            # 按修改时间排序，优先处理较新的文件
            files_to_check.sort(key=lambda x: x[1], reverse=True)

            # 限制单次处理文件数量
            max_files_per_scan = 20  # 每次轮询最多处理20个文件
            processed_count = 0

            for file_path, _ in files_to_check[:max_files_per_scan]:
                # 检查文件是否已处理
                with processed_files_lock:
                    if file_path in processed_files:
                        continue  # 文件已处理，跳过

                # 处理文件
                logger.info(f"[{line_id}] 轮询检测到文件: {file_path}")
                handler.process_file(file_path)
                processed_count += 1

            if processed_count > 0:
                logger.info(f"[{line_id}] 本次轮询处理了 {processed_count} 个文件")

            # 更新扫描时间
            last_scan_time = current_time

            # 休眠一段时间
            time.sleep(interval)

        except Exception as e:
            logger.error(f"[{line_id}] 轮询出错: {str(e)}")
            time.sleep(interval)

def monitor_directory(path, line_id):
    try:
        # 检查目录是否存在
        if not os.path.exists(path):
            os.makedirs(path, exist_ok=True)
            logger.info(f"[{line_id}] 创建目录: {path}")

        # 创建事件处理器和观察者
        event_handler = SPIFileHandler(line_id)
        observer = Observer()

        # 启动监控
        observer.schedule(event_handler, path, recursive=False)
        observer.start()

        logger.info(f"[{line_id}] 开始监控目录: {path}")
        logger.info(f"[{line_id}] 最大缓存条数: {MAX_CACHE_SIZE}")

        # 启动轮询线程
        poll_thread = threading.Thread(
            target=poll_directory,
            args=(path, line_id, event_handler, 10)  # 每10秒轮询一次
        )
        poll_thread.daemon = True
        poll_thread.start()

        # 保持线程运行
        try:
            while True:
                time.sleep(1)
        except KeyboardInterrupt:
            observer.stop()
            logger.info(f"[{line_id}] 监控已停止")

        observer.join()

    except Exception as e:
        logger.error(f"[{line_id}] 监控出错: {str(e)}")

def start_monitoring():
    # 启动SMT1监控线程
    smt1_thread = threading.Thread(
        target=monitor_directory,
        args=(SMT1_NETWORK_PATH, "SMT1")
    )
    smt1_thread.daemon = True
    smt1_thread.start()

    # 启动SMT2监控线程
    smt2_thread = threading.Thread(
        target=monitor_directory,
        args=(SMT2_NETWORK_PATH, "SMT2")
    )
    smt2_thread.daemon = True
    smt2_thread.start()

_monitoring_started = False

# 当蓝图被注册后自动启动监控
@smt_spi_analysis_bp.before_app_request
def before_request():
    global _monitoring_started
    if not _monitoring_started:
        start_monitoring()
        _monitoring_started = True
