import os
import re
import logging
from flask import Blueprint, request, jsonify, make_response
from flask_cors import cross_origin
from app.config.paths import AUTOMATION_EOL_ROOT

automation_eol_bp = Blueprint('automation_eol', __name__)

# 基础 logger（打印到控制台）
logger = logging.getLogger(__name__)
if not logger.handlers:
    logging.basicConfig(level=logging.INFO, format='[%(asctime)s] %(levelname)s %(name)s: %(message)s')


def parse_filename(file_path):
    # file_path: '25-08-06/6608189000_OK_6608189000KX11-A3202508066908_B_???-??.csv'
    dir_name, file_name = os.path.split(file_path)
    # 严格匹配 6608189000_OK_6608189000KX11-A3202508066910_B_???-??.csv
    pattern = r'^(?P<part>\d+)_(?P<result>OK|NG)_(?P<serial>[^_]+)_(?P<side>[AB])_(?P<other>.+)\.csv$'
    match = re.match(pattern, file_name)
    if not match:
        return {
            'date': dir_name,
            'part_no': None,
            'result': None,
            'serial_no': None,
            'side': None,
            'other': None,
            'file_path': file_path
        }
    return {
        'date': dir_name,
        'part_no': match.group('part'),
        'result': match.group('result'),
        'serial_no': match.group('serial'),
        'side': match.group('side'),
        'other': match.group('other'),
        'file_path': file_path
    }

@automation_eol_bp.route('/list_files', methods=['GET', 'OPTIONS'])
@cross_origin(supports_credentials=True)
def list_files():
    if request.method == 'OPTIONS':
        origin = request.headers.get('Origin')
        req_headers = request.headers.get('Access-Control-Request-Headers', 'Content-Type, Authorization')
        logger.info('Preflight OPTIONS /list_files from %s, req_headers=%s', origin, req_headers)
        resp = make_response('', 204)
        if origin:
            resp.headers['Access-Control-Allow-Origin'] = origin
            resp.headers['Vary'] = 'Origin'
        else:
            resp.headers['Access-Control-Allow-Origin'] = '*'
        resp.headers['Access-Control-Allow-Credentials'] = 'true'
        resp.headers['Access-Control-Allow-Methods'] = 'GET, OPTIONS'
        resp.headers['Access-Control-Allow-Headers'] = req_headers
        resp.headers['Access-Control-Max-Age'] = '86400'
        logger.info('Preflight /list_files respond headers: %s', dict(resp.headers))
        return resp

    # 遍历 根目录 下所有文件
    root_dir = request.args.get('dir', AUTOMATION_EOL_ROOT)
    page = int(request.args.get('page', 1))
    page_size = int(request.args.get('page_size', 10))
    serial_no = (request.args.get('serial_no') or request.args.get('sn') or '').strip()
    side_filter = (request.args.get('side') or '').strip().upper()
    only_latest = str(request.args.get('only_latest') or '').lower() in ('1', 'true', 'yes')
    # 若带了 serial_no 且未显式指定 only_latest，且未限定 side，则默认只返回最新一条
    if serial_no and (request.args.get('only_latest') is None) and not side_filter:
        only_latest = True
    logger.info('list_files called: root_dir=%s page=%s page_size=%s serial_no=%s side=%s only_latest=%s',
                root_dir, page, page_size, serial_no, side_filter, only_latest)

    if not os.path.exists(root_dir) or not os.path.isdir(root_dir):
        logger.warning('Root dir not exist or not dir: %s', root_dir)
        return jsonify({
            'code': 400,
            'message': f'目录不存在: {root_dir}',
            'data': None
        }), 400

    all_files = []
    for dirpath, dirnames, filenames in os.walk(root_dir):
        for f in filenames:
            # 若提供了 serial_no，先用文件名包含判断做一次快速过滤，减少遍历量
            if serial_no and serial_no not in f:
                continue
            full_path = os.path.join(dirpath, f)
            rel_path = os.path.relpath(full_path, root_dir)
            try:
                mtime = os.path.getmtime(full_path)
            except FileNotFoundError:
                # 处理并发删除
                continue
            all_files.append((rel_path, mtime))

    logger.info('list_files collected files: pre-sort=%d', len(all_files))

    # 先按修改时间倒序排序（全局最新在前）；随后所有过滤都保持这个顺序
    all_files.sort(key=lambda x: x[1], reverse=True)

    # 解析文件名
    file_objs = [parse_filename(f[0]) for f in all_files]

    match_mode = 'all'
    # 严格过滤：serial_no 完全匹配
    if serial_no:
        before_cnt = len(file_objs)
        file_objs = [obj for obj in file_objs if obj.get('serial_no') == serial_no]
        match_mode = 'serial_exact'
        logger.info('list_files exact filter by serial_no: before=%d after=%d', before_cnt, len(file_objs))

    # 过滤边别
    if side_filter in ('A', 'B'):
        before_cnt = len(file_objs)
        file_objs = [obj for obj in file_objs if obj.get('side') == side_filter]
        match_mode += '+side'
        logger.info('list_files side filter %s: before=%d after=%d', side_filter, before_cnt, len(file_objs))

    # 只取最新一条（如果需要）
    total = len(file_objs)
    if only_latest and total > 0:
        file_objs = file_objs[:1]
        match_mode += '+latest'
        logger.info('list_files only_latest applied, return 1 of %d', total)

    # 分页
    start = (page - 1) * page_size
    end = start + page_size
    paged_files = file_objs[start:end]

    logger.info('list_files page result: start=%d end=%d returned=%d', start, end, len(paged_files))

    return jsonify({
        'code': 200,
        'message': 'success',
        'data': {
            'total': len(file_objs),
            'page': page,
            'page_size': page_size,
            'files': paged_files,
            'filters': {
                'serial_no': serial_no or None,
                'side': side_filter or None,
                'only_latest': only_latest,
                'match_mode': match_mode
            }
        }
    })

@automation_eol_bp.route('/file_detail', methods=['GET', 'OPTIONS'])
@cross_origin(supports_credentials=True)
def file_detail():
    import csv
    import io

    if request.method == 'OPTIONS':
        origin = request.headers.get('Origin')
        req_headers = request.headers.get('Access-Control-Request-Headers', 'Content-Type, Authorization')
        logger.info('Preflight OPTIONS /file_detail from %s, req_headers=%s', origin, req_headers)
        resp = make_response('', 204)
        if origin:
            resp.headers['Access-Control-Allow-Origin'] = origin
            resp.headers['Vary'] = 'Origin'
        else:
            resp.headers['Access-Control-Allow-Origin'] = '*'
        resp.headers['Access-Control-Allow-Credentials'] = 'true'
        resp.headers['Access-Control-Allow-Methods'] = 'GET, OPTIONS'
        resp.headers['Access-Control-Allow-Headers'] = req_headers
        resp.headers['Access-Control-Max-Age'] = '86400'
        logger.info('Preflight /file_detail respond headers: %s', dict(resp.headers))
        return resp

    root_dir = request.args.get('dir', AUTOMATION_EOL_ROOT)
    file_path = request.args.get('file_path')
    logger.info('file_detail called: raw file_path=%s root_dir=%s', file_path, root_dir)

    if not file_path:
        logger.warning('file_detail missing file_path param')
        return jsonify({'code': 400, 'message': '缺少 file_path 参数', 'data': None}), 400

    # 确保 file_path 是相对路径，并规整路径，防穿越
    file_path = (file_path or '').lstrip('/').lstrip('\\')
    rel_norm = os.path.normpath(file_path)
    logger.info('file_detail normalized path: %s', rel_norm)

    if os.path.isabs(rel_norm) or rel_norm.startswith('..'):
        logger.warning('file_detail illegal path: %s', rel_norm)
        return jsonify({'code': 400, 'message': '非法的 file_path 参数', 'data': None}), 400

    abs_path = os.path.join(root_dir, rel_norm)
    logger.info('file_detail absolute path: %s', abs_path)

    if not os.path.isfile(abs_path):
        logger.error('file_detail not found: %s', abs_path)
        try:
            parent = os.path.dirname(abs_path)
            logger.info('file_detail parent listing (%s): %s', parent, os.listdir(parent)[:10])
        except Exception as e:
            logger.error('file_detail list parent failed: %s', e)
        return jsonify({'code': 404, 'message': f'文件不存在: {rel_norm}', 'data': None}), 404

    # 读取与解析（检测 BOM/UTF-16，移除 NUL，自动识别分隔符）
    try:
        with open(abs_path, 'rb') as fb:
            raw = fb.read()
        enc = None
        if raw.startswith(b'\xff\xfe'):
            enc = 'utf-16-le'
        elif raw.startswith(b'\xfe\xff'):
            enc = 'utf-16-be'
        elif raw.startswith(b'\xef\xbb\xbf'):
            enc = 'utf-8-sig'
        if enc:
            logger.info('file_detail BOM detected encoding: %s', enc)
            try:
                content = raw.decode(enc)
            except Exception as e:
                logger.warning('decode with BOM encoding %s failed: %s, fallback...', enc, e)
                content = None
        else:
            content = None
        if content is None:
            for try_enc in ['utf-8', 'gbk', 'gb2312', 'utf-16', 'utf-16-le', 'utf-16-be']:
                try:
                    content = raw.decode(try_enc)
                    enc = try_enc
                    logger.info('file_detail decoded with encoding: %s', try_enc)
                    break
                except Exception:
                    continue
        if content is None:
            # 最后兜底，避免完全失败（可能出现乱码，但能避免 NUL）
            enc = 'latin1'
            content = raw.decode(enc, errors='replace')
            logger.warning('file_detail fallback to latin1 with replace')
        # 移除 NUL，避免 csv.reader 抛错
        if '\x00' in content:
            logger.info('file_detail content contains NULs, stripping...')
            content = content.replace('\x00', '')

        # 选择分隔符
        sample = content[:8192]
        try:
            sniffed = csv.Sniffer().sniff(sample, delimiters=',;\t|')
            delimiter = sniffed.delimiter
            logger.info('file_detail sniffed delimiter: %r', delimiter)
            dialect = sniffed
        except Exception as e:
            delimiter = ','
            logger.warning('csv.Sniffer failed: %s, default delimiter=","', e)
            class SimpleDialect(csv.excel):
                delimiter = ','
            dialect = SimpleDialect()

        reader = csv.reader(io.StringIO(content), dialect=dialect)
        rows = list(reader)
    except Exception as e:
        logger.exception('file_detail parse failed: %s', e)
        return jsonify({'code': 500, 'message': f'文件解析失败: {e}', 'data': None}), 500

    header = rows[0] if rows else []
    data_rows = rows[1:] if len(rows) > 1 else []

    logger.info('file_detail parsed rows: header_len=%d rows_count=%d encoding=%s', len(header), len(data_rows), enc)

    return jsonify({
        'code': 200,
        'message': 'success',
        'data': {
            'file_path': rel_norm,
            'header': header,
            'rows': data_rows
        }
    })
