import 'dart:io';
import 'dart:typed_data';
import 'dart:math' as math;
import 'dart:convert' as json;

import 'package:opencv_core/opencv.dart' as cv;
// import 'package:dartcv4/dartcv.dart' as cv;
// import 'package:dartcv4/core.dart' as Core;
// import 'package:dartcv4/imgproc.dart' as Imgproc;
// import 'package:dartcv4/imgcodecs.dart' as Imgcodecs;
// import 'package:opencv_dart/opencv.dart' as cv;


import 'package:path_provider/path_provider.dart';
import 'package:ffmpeg_kit_flutter_new/ffmpeg_kit.dart';
import 'package:ffmpeg_kit_flutter_new/return_code.dart';
import 'package:ffmpeg_kit_flutter_new/session.dart';
import 'package:image_gallery_saver_plus/image_gallery_saver_plus.dart';
import 'configuration.dart';
import 'motility_analysis_result.dart';

class ProcessResult {
  final List<cv.Point> positions;
  final int spermCount;

  const ProcessResult({
    required this.positions,
    required this.spermCount,
  });
}

class Motility {
  final Configuration? config;

  Motility({this.config});

  /// 处理单帧图像
  ProcessResult processFrame(cv.Mat frame) {
    final config = this.config;
    if (config == null) {
      throw StateError("Configuration is null");
    }


    // 创建结构元素
    final kernel = cv.getStructuringElement(
      cv.MORPH_ELLIPSE,
      (config.grayKernalSize.toInt(), config.grayKernalSize.toInt()),
    );

    final gray = cv.Mat.empty();
    final blurred = cv.Mat.empty();
    final binary = cv.Mat.empty();
    final morph = cv.Mat.empty();


    // 图像预处理
    cv.convertScaleAbs(frame,dst: gray, alpha:config.grayAlpha, beta: config.grayBeta);
    cv.cvtColor(gray, cv.COLOR_BGR2GRAY,dst: gray);
    cv.gaussianBlur(
      gray,
      (config.grayBlurredSize.toInt(), config.grayBlurredSize.toInt()),
      0.0,
      dst: blurred
    );

    cv.adaptiveThreshold(
      blurred,
      config.grayMaxValue.toDouble(),
      cv.ADAPTIVE_THRESH_GAUSSIAN_C ,
      cv.THRESH_BINARY_INV,
      config.grayBlockSize,
      config.grayCThreshold,
      dst: binary,

    );

    cv.morphologyEx(binary, cv.MORPH_OPEN, kernel,dst: morph);

    return _detectAndMarkSpermInFrame(frame, morph);
  }

  /// 检测并标记精子
  ProcessResult _detectAndMarkSpermInFrame(cv.Mat frame, cv.Mat binaryImage) {
    final config = this.config;
    if (config == null) {
      throw StateError("Configuration is null");
    }

    // var contours = Contours;
    // var hierarchy = VecVec4i;

    final contours = cv.findContours(
      binaryImage,
      cv.RETR_EXTERNAL,
      cv.CHAIN_APPROX_SIMPLE,
    ).$1;

    final hierarchy = cv.findContours(
      binaryImage,
      cv.RETR_EXTERNAL,
      cv.CHAIN_APPROX_SIMPLE,
    ).$2;

    final grayImage = cv.Mat.empty();
    //colorBgr2gray
    cv.cvtColor(frame, cv.COLOR_BGR2GRAY,dst: grayImage);

    final positions = <cv.Point>[];
    int spermCount = 0;

    for (final contour in contours) {
      final area = cv.contourArea(contour);
      final boundingBox = cv.boundingRect(contour);

      final sizeMatch = area >= config.regMinSize && area <= config.regMaxSize;
      final mask = cv.Mat.zeros(grayImage.rows,grayImage.cols, cv.MatType.CV_8UC1);
      cv.drawContours(mask, cv.Contours.fromVecPoint(contour), -1, cv.Scalar(255.0), thickness: cv.FILLED);
      final meanBrightness = cv.mean(grayImage, mask: mask).val[0];
      final colorMatch = meanBrightness <= config.regGrayThreshold;

      if (sizeMatch && colorMatch) {
        cv.drawContours(frame, cv.Contours.fromVecPoint(contour), -1, cv.Scalar(0.0, 255.0, 0.0), thickness: cv.FILLED);
        spermCount++;
      } else if (sizeMatch && !colorMatch) {
        cv.drawContours(frame, cv.Contours.fromVecPoint(contour), -1, cv.Scalar(0.0, 255.0, 255.0), thickness: cv.FILLED);
      } else if (!sizeMatch && colorMatch) {
        cv.drawContours(frame, cv.Contours.fromVecPoint(contour), -1, cv.Scalar(255.0, 0.0, 255.0), thickness: cv.FILLED);
       //fontHersheySimplex
        cv.putText(
          frame,
          area.toStringAsFixed(2),
          cv.Point((boundingBox.x - 5.0).toInt(), (boundingBox.y - 5.0).toInt()),
          cv.FONT_HERSHEY_SIMPLEX,
          0.3,
          cv.Scalar(255.0, 255.0, 0.0),
          thickness: 1,
        );
      } else {
        cv.drawContours(frame, cv.Contours.fromVecPoint(contour), -1, cv.Scalar(0.0, 0.0, 255.0), thickness: cv.FILLED);
        cv.putText(
          frame,
          area.toStringAsFixed(2),
          cv.Point((boundingBox.x - 5.0).toInt(), (boundingBox.y - 5.0).toInt()),
          cv.FONT_HERSHEY_SIMPLEX,
          0.3,
          cv.Scalar(255.0, 255.0, 0.0),
          thickness: 1,
        );
      }

      if (sizeMatch && colorMatch && config.showSpermBox) {
        final sideLength = math.max(boundingBox.width, boundingBox.height);
        final squareBox = cv.Rect(
          boundingBox.x - 5,
          boundingBox.y - 5,
          sideLength + 15,
          sideLength + 15,
        );
        cv.rectangle(frame, squareBox, cv.Scalar(0.0, 0.0, 255.0),thickness:2);
      }

      if (!config.trackSpermOnly || (sizeMatch && colorMatch)) {
        final moments = cv.moments(cv.Mat.fromVec(contour));
        if (moments.m00 != 0.0) {
          final cX = moments.m10 / moments.m00;
          final cY = moments.m01 / moments.m00;
          positions.add(cv.Point(cX.toInt(), cY.toInt()));
        }
      }
    }

    return ProcessResult(positions: positions, spermCount: spermCount);
  }

  /// 检查编码器是否支持
  Future<bool> _isEncoderSupported(String encoderName) async {
    print("Motility: Checking for encoder: $encoderName");
    final session = await FFmpegKit.execute("-encoders");
    if (ReturnCode.isSuccess(await session.getReturnCode())) {
      final output = await session.getOutput();
      final isSupported = output?.split('\n').any((line) =>
          line.trim().startsWith("V") && line.contains(encoderName));
      if (isSupported!) {
        print("Motility: Encoder '$encoderName' is supported.");
      } else {
        print("Motility: Encoder '$encoderName' is NOT supported.");
      }
      return isSupported;
    } else {
      print("Motility: Failed to get list of encoders from FFmpeg.");
      return false;
    }
  }

  /// 提取视频片段
  Future<List<String>> _extractVideoSegments(
    String videoPath,
    int segmentCount,
    double segmentDuration,
  ) async {
    final outputPaths = <String>[];
    
    try {
      final useLibx264 = await _isEncoderSupported("libx264");
      if (!useLibx264) {
        print("Motility: libx264 not supported. Falling back to '-c copy'. This may be inaccurate.");
      }

      print("Motility: Extracting video segments from: $videoPath");
      
      // 获取视频信息
      final session = await FFmpegKit.execute("-i \"$videoPath\"");
      final output = await session.getOutput();
      
      // 解析视频时长
      double totalSeconds = 0.0;
      final durationMatch = RegExp(r'Duration: (\d{2}):(\d{2}):(\d{2})\.(\d{2})').firstMatch(output!);
      if (durationMatch != null) {
        final hours = int.parse(durationMatch.group(1)!);
        final minutes = int.parse(durationMatch.group(2)!);
        final seconds = int.parse(durationMatch.group(3)!);
        final centiseconds = int.parse(durationMatch.group(4)!);
        totalSeconds = hours * 3600 + minutes * 60 + seconds + centiseconds / 100.0;
      }

      final interval = (totalSeconds / segmentCount).toInt();
      final tempDir = await getTemporaryDirectory();
      final outputDir = Directory('${tempDir.path}/segments');
      if (!await outputDir.exists()) {
        await outputDir.create(recursive: true);
      }

      // 清除之前的片段
      final files = outputDir.listSync();
      for (final file in files) {
        if (file is File) {
          await file.delete();
        }
      }

      const offset = 3;
      for (int i = 0; i < segmentCount; i++) {
        final startTime = i == 0 ? offset : i * interval;
        if (startTime >= totalSeconds) continue;
        final currentSegmentDuration = math.min(segmentDuration, totalSeconds - startTime);

        final outputPath = '${outputDir.path}/segment_${i + 1}.mp4';

        // 使用libx264如果可用，否则回退到直接复制流
        final ffmpegCommand = useLibx264
            ? "-y -i \"$videoPath\" -ss $startTime -t $currentSegmentDuration -c:v libx264 -preset ultrafast -c:a aac \"$outputPath\""
            : "-y -i \"$videoPath\" -ss $startTime -t $currentSegmentDuration -c copy \"$outputPath\"";

        print("Motility: FFmpeg command: $ffmpegCommand");

        final segmentSession = await FFmpegKit.execute(ffmpegCommand);
        final returnCode = await segmentSession.getReturnCode();
        final outputFile = File(outputPath);
        final fileSizeInBytes = await outputFile.exists() ? await outputFile.length() : 0;

        print("Motility: Segment $i: startTime=$startTime, duration=$currentSegmentDuration, size=${fileSizeInBytes} bytes");

        if (ReturnCode.isSuccess(returnCode) && await outputFile.exists() && await outputFile.length() > 0) {
          print("Motility: Successfully created segment: $outputPath (${await outputFile.length()} bytes)");
          outputPaths.add(outputPath);
        } else {
          print("Motility: FFmpeg failed for segment $i. Return code: $returnCode");
          print("Motility: FFmpeg output: ${await segmentSession.getOutput()}");
          if (await outputFile.exists()) {
            await outputFile.delete();
          }
        }
      }
    } catch (e) {
      print("Motility: Error extracting segments: $e");
    }
    
    return outputPaths;
  }

  /// 分析视频
  Future<MotilityAnalysisResult?> analyze(String videoPath) async {
    final config = this.config;
    if (config == null) return null;
    
    print("Motility: Extracting video: $videoPath");
    
    // 检查视频文件是否存在
    final videoFile = File(videoPath);
    if (!await videoFile.exists()) {
      print("Motility: Video file does not exist: $videoPath");
      return null;
    }

    final segmentCount = config.segmentCount;
    final segmentDuration = config.segmentDuration;
    final segments = await _extractVideoSegments(videoPath, segmentCount, segmentDuration);
    
    final results = <MotilityAnalysisResult>[];

    for (final segment in segments) {
      final result = await _analyzeSegment(segment);
      if (result != null) {
        results.add(result);
      }
    }

    if (results.isEmpty) {

      print("Motility: results 为空");
      return null;
    }

    final totalNumOfALevelSperms = results.fold<int>(0, (sum, result) => sum + result.numOfALevelSperms);
    final totalNumOfBLevelSperms = results.fold<int>(0, (sum, result) => sum + result.numOfBLevelSperms);
    final totalNumOfCLevelSperms = results.fold<int>(0, (sum, result) => sum + result.numOfCLevelSperms);
    final totalNumOfDetectedSperms = results.fold<int>(0, (sum, result) => sum + result.numOfDetectedSperms);

    final numOfMotileTrajectories = results.fold<int>(0, (sum, result) => sum + result.numOfMotileTrajectories);

    return MotilityAnalysisResult(
      avgTotalDisplacement: 0.0,
      avgVsl: 0.0,
      avgVcl: 0.0,
      avgVap: 0.0,
      avgAlh: 0.0,
      numOfMotileTrajectories: results.isNotEmpty ? numOfMotileTrajectories ~/ results.length : 0,
      numOfDetectedSperms: results.isNotEmpty ? totalNumOfDetectedSperms ~/ results.length : 0,
      numOfALevelSperms: results.isNotEmpty ? totalNumOfALevelSperms ~/ results.length : 0,
      numOfBLevelSperms: results.isNotEmpty ? totalNumOfBLevelSperms ~/ results.length : 0,
      numOfCLevelSperms: results.isNotEmpty ? totalNumOfCLevelSperms ~/ results.length : 0,
      imgData: null,
      imgDataOfSpermDetection: null,
      createdTime: DateTime.now().millisecondsSinceEpoch ~/ 1000,
    );
  }

  /// 通过解码计算帧数
  Future<int> _countFramesByDecoding(String videoPath) async {
    // 使用FFmpeg来获取视频帧数
    try {
      print("Motility: 正在获取视频帧数: $videoPath");
      
      // 方法1: 尝试直接获取帧数
      final session = await FFmpegKit.execute(
        '-i "$videoPath" -v error -select_streams v:0 -count_frames -show_entries stream=nb_frames -of csv=p=0',
      );

      final returnCode = await session.getReturnCode();
      final output = await session.getOutput();
      
      if (ReturnCode.isSuccess(returnCode) && output != null && output.trim().isNotEmpty) {
        final frameCount = int.tryParse(output.trim());
        if (frameCount != null && frameCount > 0) {
          print("Motility: 成功获取帧数: $frameCount");
          return frameCount;
        }
      }
      
      // 方法2: 如果直接获取失败，通过时长和帧率计算
      print("Motility: 直接获取失败，尝试通过时长和帧率计算");
      final session2 = await FFmpegKit.execute(
        '-i "$videoPath" -v error -select_streams v:0 -show_entries stream=duration,r_frame_rate -of csv=p=0',
      );
      
      final returnCode2 = await session2.getReturnCode();
      final output2 = await session2.getOutput();
      
      if (ReturnCode.isSuccess(returnCode2) && output2 != null && output2.trim().isNotEmpty) {
        final lines = output2.trim().split('\n');
        if (lines.length >= 2) {
          final duration = double.tryParse(lines[0]);
          final frameRateStr = lines[1];
          
          if (duration != null && frameRateStr.isNotEmpty) {
            // 解析帧率 (例如: "30/1" 或 "29.97")
            double frameRate = 30.0; // 默认帧率
            if (frameRateStr.contains('/')) {
              final parts = frameRateStr.split('/');
              if (parts.length == 2) {
                final numerator = double.tryParse(parts[0]);
                final denominator = double.tryParse(parts[1]);
                if (numerator != null && denominator != null && denominator != 0) {
                  frameRate = numerator / denominator;
                }
              }
            } else {
              frameRate = double.tryParse(frameRateStr) ?? 30.0;
            }
            
            final calculatedFrames = (duration * frameRate).round();
            print("Motility: 通过时长(${duration}s)和帧率(${frameRate}fps)计算帧数: $calculatedFrames");
            return calculatedFrames;
          }
        }
      }
      
      // 方法3: 最后尝试从视频信息中解析
      print("Motility: 尝试从视频信息中解析帧数");
      final session3 = await FFmpegKit.execute('-i "$videoPath"');
      final output3 = await session3.getOutput();
      
      if (output3 != null) {
        // 尝试从输出中解析帧率信息
        final frameRateMatch = RegExp(r'(\d+(?:\.\d+)?)\s*fps').firstMatch(output3);
        final durationMatch = RegExp(r'Duration:\s*(\d{2}):(\d{2}):(\d{2})\.(\d{2})').firstMatch(output3);
        
        if (frameRateMatch != null && durationMatch != null) {
          final fps = double.parse(frameRateMatch.group(1)!);
          final hours = int.parse(durationMatch.group(1)!);
          final minutes = int.parse(durationMatch.group(2)!);
          final seconds = int.parse(durationMatch.group(3)!);
          final centiseconds = int.parse(durationMatch.group(4)!);
          final totalSeconds = hours * 3600 + minutes * 60 + seconds + centiseconds / 100.0;
          
          final calculatedFrames = (totalSeconds * fps).round();
          print("Motility: 通过基本信息计算帧数: $calculatedFrames (${totalSeconds}s * ${fps}fps)");
          return calculatedFrames;
        }
      }
      
      print("Motility: 所有方法都失败，返回默认值");
      return 0;
      
    } catch (e) {
      print("Motility: 获取帧数出错: $e");
      return 0;
    }
  }

  /// 使用ffmpeg获取视频帧宽高
  Future<Map<String, int>?> getVideoDimensions(String videoPath) async {
    try {
      print("Motility: 正在获取视频尺寸: $videoPath");
      
      // 检查视频文件是否存在
      final videoFile = File(videoPath);
      if (!await videoFile.exists()) {
        print("Motility: 视频文件不存在: $videoPath");
        return null;
      }
      
      // 使用ffmpeg获取视频信息
      final session = await FFmpegKit.execute(
        '-i "$videoPath" -v error -select_streams v:0 -show_entries stream=width,height -of csv=p=0',
      );
      
      final returnCode = await session.getReturnCode();
      final output = await session.getOutput();
      
      if (ReturnCode.isSuccess(returnCode) && output != null && output.trim().isNotEmpty) {
        final dimensions = output.trim().split(',');
        if (dimensions.length == 2) {
          final width = int.tryParse(dimensions[0]);
          final height = int.tryParse(dimensions[1]);
          
          if (width != null && height != null && width > 0 && height > 0) {
            print("Motility: 成功获取视频尺寸: ${width}x${height}");
            return {
              'width': width,
              'height': height,
            };
          }
        }
      }
      
      // 如果直接获取失败，尝试从完整视频信息中解析
      print("Motility: 直接获取失败，尝试从完整视频信息中解析");
      final session2 = await FFmpegKit.execute('-i "$videoPath"');
      final output2 = await session2.getOutput();
      
      if (output2 != null) {
        // 解析视频尺寸信息
        final sizeMatch = RegExp(r'(\d{3,4})x(\d{3,4})').firstMatch(output2);
        if (sizeMatch != null) {
          final width = int.parse(sizeMatch.group(1)!);
          final height = int.parse(sizeMatch.group(2)!);
          
          if (width > 0 && height > 0) {
            print("Motility: 从视频信息中解析尺寸: ${width}x${height}");
            return {
              'width': width,
              'height': height,
            };
          }
        }
      }
      
      print("Motility: 无法获取视频尺寸");
      return null;
      
    } catch (e) {
      print("Motility: 获取视频尺寸出错: $e");
      return null;
    }
  }

  /// 获取视频的详细信息（包括宽高、帧率、时长等）
  Future<Map<String, dynamic>?> getVideoInfo(String videoPath) async {
    try {
      print("Motility: 正在获取视频详细信息: $videoPath");
      
      // 检查视频文件是否存在
      final videoFile = File(videoPath);
      if (!await videoFile.exists()) {
        print("Motility: 视频文件不存在: $videoPath");
        return null;
      }
      
      // 获取视频的详细信息
      final session = await FFmpegKit.execute(
        '-i "$videoPath" -v error -select_streams v:0 -show_entries stream=width,height,r_frame_rate,duration,nb_frames -of json',
      );
      
      final returnCode = await session.getReturnCode();
      final output = await session.getOutput();
      
      if (ReturnCode.isSuccess(returnCode) && output != null && output.trim().isNotEmpty) {
        try {
          // 解析JSON输出
          final Map<String, dynamic> jsonData = json.jsonDecode(output);
          final streams = jsonData['streams'] as List<dynamic>?;
          
          if (streams != null && streams.isNotEmpty) {
            final stream = streams.first as Map<String, dynamic>;
            final width = stream['width'] as int?;
            final height = stream['height'] as int?;
            final frameRate = stream['r_frame_rate'] as String?;
            final duration = stream['duration'] as String?;
            final nbFrames = stream['nb_frames'] as String?;
            
            // 解析帧率
            double? fps;
            if (frameRate != null) {
              if (frameRate.contains('/')) {
                final parts = frameRate.split('/');
                if (parts.length == 2) {
                  final numerator = double.tryParse(parts[0]);
                  final denominator = double.tryParse(parts[1]);
                  if (numerator != null && denominator != null && denominator != 0) {
                    fps = numerator / denominator;
                  }
                }
              } else {
                fps = double.tryParse(frameRate);
              }
            }
            
            final result = <String, dynamic>{
              'width': width,
              'height': height,
              'frameRate': fps,
              'duration': duration != null ? double.tryParse(duration) : null,
              'frameCount': nbFrames != null ? int.tryParse(nbFrames) : null,
            };
            
            print("Motility: 成功获取视频信息: $result");
            return result;
          }
        } catch (e) {
          print("Motility: 解析JSON失败: $e");
        }
      }
      
      // 如果JSON解析失败，尝试从文本输出中解析基本信息
      print("Motility: JSON解析失败，尝试从文本输出中解析");
      final session2 = await FFmpegKit.execute('-i "$videoPath"');
      final output2 = await session2.getOutput();
      
      if (output2 != null) {
        final sizeMatch = RegExp(r'(\d{3,4})x(\d{3,4})').firstMatch(output2);
        final fpsMatch = RegExp(r'(\d+(?:\.\d+)?)\s*fps').firstMatch(output2);
        final durationMatch = RegExp(r'Duration:\s*(\d{2}):(\d{2}):(\d{2})\.(\d{2})').firstMatch(output2);
        
        if (sizeMatch != null) {
          final width = int.parse(sizeMatch.group(1)!);
          final height = int.parse(sizeMatch.group(2)!);
          
          double? fps;
          if (fpsMatch != null) {
            fps = double.parse(fpsMatch.group(1)!);
          }
          
          double? duration;
          if (durationMatch != null) {
            final hours = int.parse(durationMatch.group(1)!);
            final minutes = int.parse(durationMatch.group(2)!);
            final seconds = int.parse(durationMatch.group(3)!);
            final centiseconds = int.parse(durationMatch.group(4)!);
            duration = hours * 3600 + minutes * 60 + seconds + centiseconds / 100.0;
          }
          
          final result = <String, dynamic>{
            'width': width,
            'height': height,
            'frameRate': fps,
            'duration': duration,
            'frameCount': fps != null && duration != null ? (duration * fps).round() : null,
          };
          
          print("Motility: 从文本输出解析视频信息: $result");
          return result;
        }
      }
      
      print("Motility: 无法获取视频详细信息");
      return null;
      
    } catch (e) {
      print("Motility: 获取视频详细信息出错: $e");
      return null;
    }
  }

  /// 获取视频的第N帧并转换为Mat对象
  Future<cv.Mat?> getVideoFrameAt(String videoPath, int frameNumber,int? totalFrames,double fps) async {
    try {
      print("Motility: 正在获取视频第${frameNumber}帧: $videoPath");
      
      // // 检查视频文件是否存在
      // final videoFile = File(videoPath);
      // if (!await videoFile.exists()) {
      //   print("Motility: 视频文件不存在: $videoPath");
      //   return null;
      // }
      //
      // // 获取视频信息以验证帧数范围
      // final videoInfo = await getVideoInfo(videoPath);
      // if (videoInfo == null) {
      //   print("Motility: 无法获取视频信息");
      //   return null;
      // }
      //
      // final totalFrames = videoInfo['frameCount'] as int?;

      if (totalFrames != null && frameNumber >= totalFrames) {
        print("Motility: 帧数超出范围，总帧数: $totalFrames，请求帧数: $frameNumber");
        return null;
      }
      
      // 创建临时目录用于存储提取的帧
      final tempDir = await getTemporaryDirectory();
      final frameDir = Directory('${tempDir.path}/frames');
      if (!await frameDir.exists()) {
        await frameDir.create(recursive: true);
      }
      
      // 生成临时帧文件路径
      final framePath = '${frameDir.path}/frame_${frameNumber}.jpg';
      
      // 使用ffmpeg提取指定帧
      // -ss 参数用于设置开始时间，-vframes 1 表示只提取一帧
      // final fps = videoInfo['frameRate'] as double? ?? 30.0;
      final startTime = frameNumber / fps;
      
      final session = await FFmpegKit.execute(
        '-y -i "$videoPath" -ss $startTime -vframes 1 -q:v 2 "$framePath"',
      );
      
      final returnCode = await session.getReturnCode();
      final output = await session.getOutput();
      
      if (ReturnCode.isSuccess(returnCode)) {
        // 检查提取的帧文件是否存在
        final frameFile = File(framePath);
        if (await frameFile.exists()) {
          print("Motility: 成功提取第${frameNumber}帧到: $framePath");
          print("Motility: ============ 5555");

          // 使用opencv读取图像文件

          final mat = await cv.imread(framePath);

          print("Motility: ============ 6666");

          if (mat != null && !mat.isEmpty) {
            print("Motility: 成功将第${frameNumber}帧转换为Mat对象，尺寸: ${mat.cols}x${mat.rows}");
            
            // 清理临时文件
            await frameFile.delete();
            
            return mat;
          } else {
            print("Motility: 无法读取提取的帧文件");
            if (await frameFile.exists()) {
              await frameFile.delete();
            }
            return null;
          }
        } else {
          print("Motility: 提取的帧文件不存在: $framePath");
          return null;
        }
      } else {
        print("Motility: FFmpeg提取帧失败，返回码: $returnCode");
        print("Motility: FFmpeg输出: $output");
        
        // 清理可能存在的临时文件
        final frameFile = File(framePath);
        if (await frameFile.exists()) {
          await frameFile.delete();
        }
        
        return null;
      }
      
    } catch (e) {
      print("Motility: 获取视频第${frameNumber}帧出错: $e");
      return null;
    }
  }

  /// 获取视频的多个帧
  // Future<List<Mat>> getVideoFrames(String videoPath, List<int> frameNumbers) async {
  //   final frames = <Mat>[];
    
  //   for (final frameNumber in frameNumbers) {
  //     final frame = await getVideoFrameAt(videoPath, frameNumber);
  //     if (frame != null) {
  //       frames.add(frame);
  //     }
  //   }
    
  //   print("Motility: 成功获取 ${frames.length}/${frameNumbers.length} 帧");
  //   return frames;
  // }

  /// 获取视频的连续帧范围
  // Future<List<Mat>> getVideoFrameRange(String videoPath, int startFrame, int endFrame, {int step = 1}) async {
  //   final frames = <Mat>[];
    
  //   for (int frameNumber = startFrame; frameNumber <= endFrame; frameNumber += step) {
  //     final frame = await getVideoFrameAt(videoPath, frameNumber);
  //     if (frame != null) {
  //       frames.add(frame);
  //     }
  //   }
    
  //   print("Motility: 成功获取帧范围 ${startFrame}-${endFrame} (步长${step})，共 ${frames.length} 帧");
  //   return frames;
  // }

  /// 获取视频的随机采样帧
  // Future<List<Mat>> getVideoRandomFrames(String videoPath, int count) async {
  //   try {
  //     // 获取视频信息
  //     final videoInfo = await getVideoInfo(videoPath);
  //     if (videoInfo == null) {
  //       print("Motility: 无法获取视频信息");
  //       return [];
  //     }
      
  //     final totalFrames = videoInfo['frameCount'] as int? ?? 0;
  //     if (totalFrames <= 0) {
  //       print("Motility: 视频总帧数为0");
  //       return [];
  //     }
      
  //     // 生成随机帧号
  //     final random = math.Random();
  //     final frameNumbers = <int>{};
      
  //     while (frameNumbers.length < count && frameNumbers.length < totalFrames) {
  //       frameNumbers.add(random.nextInt(totalFrames));
  //     }
      
  //     // 获取随机帧
  //     return await getVideoFrames(videoPath, frameNumbers.toList());
      
  //   } catch (e) {
  //     print("Motility: 获取随机帧出错: $e");
  //     return [];
  //   }
  // }

  /// 分析视频片段
  Future<MotilityAnalysisResult?> _analyzeSegment(String videoPath) async {
    final config = this.config;
    if (config == null) return null;
    
    print("Motility: Analyzing video: $videoPath");

    try {
      await Future.delayed(Duration(milliseconds: 2000)); // 模拟延迟
    } catch (e) {
      print("Motility: Sleep interrupted: $e");
    }

    // 注意：opencv_dart的VideoCapture API可能与原生OpenCV不同
    // 这里需要根据实际的opencv_dart API来调整
    // 以下是概念性的实现


    // final videoCapture = VideoCapture.create(videoPath);
    var videoInfo = await getVideoInfo(videoPath);
    if(videoInfo == null){
      print("Motility:=== videoInfo: $videoInfo");
      videoInfo = <String, dynamic>{
        'width': 640,
        'height': 480,
        'frameRate': 30,
        'frameCount': 0,
      };
    }


    final totalFrames = videoInfo['frameCount'];
    if (totalFrames <= 0) {
      print("Motility: Couldn't read video stream from file: $videoPath");
      return null;
    }

    // 假设的帧率和尺寸
    final fps = videoInfo['frameRate'];
    // final fps = videoCapture.get(CAP_PROP_FPS);
    final frameWidth = videoInfo['width'];
    final frameHeight = videoInfo['height'];

    final cropWidth = (frameWidth * 0.5).toInt();
    final cropHeight = (frameHeight * 0.9).toInt();

    final xStart = ((frameWidth - cropWidth) ~/ 2);
    final yStart = ((frameHeight - cropHeight) ~/ 2);

    final spermTracks = <int, List<cv.Point>>{};
    int spermID = 0;

    final tempDir = await getTemporaryDirectory();
    final outputPath = '${tempDir.path}/asperm.jpg';
    final outputPath2 = '${tempDir.path}/asperm_detection.jpg';
    cv.Mat? outputFrame;

    int sumOfSperms = 0;
    int sumOfSpermDetections = 0;

    // 这里需要根据opencv_dart的实际API来实现视频帧处理
    // 由于opencv_dart的限制，这里提供概念性的实现
    // 遍历所有帧（对应Kotlin的 for (frameNum in 0 until totalFrames)）
    for (var frameNum = 0; frameNum < totalFrames; frameNum++) {
      // 读取当前帧（对应Kotlin的 videoCapture.read(frame)）
      final frame = await getVideoFrameAt(videoPath, frameNum, totalFrames, fps);

      // final success = VideoCapture.read(frame);
      // if (!success) {
      //   frame.release(); // 释放资源
      //   break;
      // }

      // 根据配置裁剪帧（对应Kotlin的 Mat(frame, Rect(...))）
      late cv.Mat processedFrame;
      if (config.cropFrame) {
        // 裁剪区域：Rect(x, y, width, height)
        final cropRect = cv.Rect(
          xStart,
          yStart,
          cropWidth,
          cropHeight,
        );

        processedFrame = cv.Mat.fromMat(frame!,roi:cropRect); // 裁剪子矩阵
      } else {
        processedFrame = frame!; // 不裁剪，直接使用原帧
      }

      // 处理帧（假设processFrame已实现，返回精子位置和数量）
      final ProcessResult result = processFrame(processedFrame);
      final List<cv.Point> spermPositionsInFrame = result.positions;
      final int spermCount = result.spermCount;

      if (spermPositionsInFrame.isNotEmpty) {
        // 累加统计数据
        sumOfSperms += spermPositionsInFrame.length;
        sumOfSpermDetections++;

        // 匹配并更新精子轨迹
        for (final pos in spermPositionsInFrame) {
          bool matched = false;

          // 遍历已有轨迹，寻找匹配的上一位置
          for (final entry in spermTracks.entries) {
            final track = entry.value;
            if (track.isNotEmpty) {
              // 计算当前位置与轨迹最后一个位置的距离（对应Kotlin的Core.norm）
              final lastPos = track.last;

              // val distance = Core.norm(MatOfPoint2f(track.last(), pos))
              // 使用简单的欧几里得距离计算，而不是MatOfPoint2f
              final distance = math.sqrt(
                math.pow(pos.x - lastPos.x, 2) + math.pow(pos.y - lastPos.y, 2)
              );

              // 如果距离小于阈值，匹配成功，添加到轨迹
              if (distance < config.matchDistance) {
                track.add(pos);
                matched = true;
                break; // 跳出当前轨迹循环
              }
            }
          }

          // 未匹配到现有轨迹，创建新轨迹
          if (!matched) {
            spermTracks[spermID++] = [pos];
          }
        }

        // 绘制轨迹线和当前位置（对应Kotlin的Imgproc.line和circle）
        for (final track in spermTracks.values) {
          if (track.isNotEmpty) {
            // 绘制轨迹线（连续两个点之间）
            for (var i = 1; i < track.length; i++) {
              final prev = track[i - 1];
              final curr = track[i];
              cv.line(
                processedFrame,
                prev,
                curr,
                cv.Scalar(0.0, 255.0, 0.0), // 绿色线（BGR格式）
                thickness: 2, // 线宽
              );
            }

            // 绘制当前位置圆圈
            final currPos = track.last;
            cv.circle(
              processedFrame,
              currPos,
              3, // 半径
              cv.Scalar(0.0, 0.0, 255.0), // 红色圆圈（BGR格式）
              thickness: -1, // 填充（-1表示填充）
            );
          }
        }

        outputFrame = processedFrame; // 更新输出帧
      }
    }

    final motileTrajectories = <List<double>>[];

    // 遍历每条轨迹
    spermTracks.forEach((_, points) {
      // 过滤掉点数量不足的轨迹（至少需要2个点）
      if (points.length > 1) {
        // 1. 计算总位移（实际运动路径长度）
        double totalDisplacement = 0.0;
        // 遍历相邻的点对
        for (int i = 0; i < points.length - 1; i++) {
          final prev = points[i];
          final curr = points[i + 1];
          // 计算两点间的欧氏距离
          // final distance = cv.Core.norm(
          //   cv.MatOfPoint2f.fromPoints([prev, curr]),
          // );

          final distance = math.sqrt(
              math.pow(curr.x - prev.x, 2) + math.pow(curr.y - prev.y, 2)
          );
          totalDisplacement += distance;
        }

        // 2. 计算起点到终点的直线距离和总时间
        final startPos = points.first;
        final endPos = points.last;
        // final straightLineDistance = cv.Core.norm(
        //   cv.MatOfPoint2f.fromPoints([startPos, endPos]),
        // );
        final straightLineDistance = math.sqrt(
            math.pow(endPos.x - startPos.x, 2) + math.pow(endPos.y - startPos.y, 2)
        );
        final totalTime = points.length / fps; // 总时间（秒）

        // 3. 计算速度参数
        final vsl = straightLineDistance / totalTime; // 直线速度
        final vcl = totalDisplacement / totalTime;     // 曲线速度

        // 4. 计算平均路径（所有点的坐标平均值）
        double avgX = 0.0;
        double avgY = 0.0;
        for (final pos in points) {
          avgX += pos.x;
          avgY += pos.y;
        }
        avgX /= points.length;
        avgY /= points.length;
        final averagePath = cv.Point(avgX.toInt(), avgY.toInt());

        // 5. 计算平均路径相关参数
        double totalAveragePathDistance = 0.0;
        for (final pos in points) {
          // totalAveragePathDistance += cv.norm(
          //   cv.MatOfPoint2f.fromPoints([pos, averagePath]),
          // );
          totalAveragePathDistance += math.sqrt(
              math.pow(averagePath.x - pos.x, 2) + math.pow(averagePath.y - pos.y, 2)
          );
        }
        final vap = totalAveragePathDistance / totalTime; // 平均路径速度
        final alh = totalAveragePathDistance / points.length; // 头部侧摆幅度

        // 添加到结果列表
        motileTrajectories.add([
          totalDisplacement,
          vsl,
          vcl,
          vap,
          alh,
        ]);
      }
    });
    
    // 计算平均值
    double avgTotalDisplacement = 0.0;
    double avgVsl = 0.0;
    double avgVcl = 0.0;
    double avgVap = 0.0;
    double avgAlh = 0.0;
    const pixelRatio = 0.5;

    int numOfALevelSperms = 0;
    int numOfBLevelSperms = 0;
    int numOfCLevelSperms = 0;

    for (final trajectory in motileTrajectories) {
      avgTotalDisplacement += trajectory[0];
      avgVsl += trajectory[1];
      avgVcl += trajectory[2];
      avgVap += trajectory[3];
      avgAlh += trajectory[4];

      final vsl = trajectory[1] / pixelRatio;
      final vcl = trajectory[2] / pixelRatio;
      final lin = vcl > 0 ? vsl / vcl : 0.0;

      if (lin >= 0.6 && vcl > 40) {
        numOfALevelSperms++;
      } else if (vcl >= 20.0 && vcl <= 40.0) {
        numOfBLevelSperms++;
      } else if (vcl > 40 && lin < 0.6) {
        numOfCLevelSperms++;
      }
    }

    if (motileTrajectories.isNotEmpty) {
      avgTotalDisplacement /= motileTrajectories.length;
      avgVsl /= motileTrajectories.length;
      avgVcl /= motileTrajectories.length;
      avgVap /= motileTrajectories.length;
      avgAlh /= motileTrajectories.length;

      avgTotalDisplacement /= pixelRatio;
      avgVsl /= pixelRatio;
      avgVcl /= pixelRatio;
      avgVap /= pixelRatio;
      avgAlh /= pixelRatio;
    }

    final numOfDetectedSperms = sumOfSpermDetections > 0 ? (sumOfSperms / sumOfSpermDetections).toInt() : 0;
    
    // 保存图像数据
    Uint8List? imgDataOfSpermDetection;
    if (outputFrame != null) {
      // 将Mat转换为Uint8List
      // 这里需要根据opencv_dart的实际API来实现
      imgDataOfSpermDetection = null; // 临时值
    }

    final result = MotilityAnalysisResult(
      avgTotalDisplacement: avgTotalDisplacement,
      avgVsl: avgVsl,
      avgVcl: avgVcl,
      avgVap: avgVap,
      avgAlh: avgAlh,
      numOfMotileTrajectories: motileTrajectories.length,
      numOfDetectedSperms: numOfDetectedSperms,
      numOfALevelSperms: numOfALevelSperms,
      numOfBLevelSperms: numOfBLevelSperms,
      numOfCLevelSperms: numOfCLevelSperms,
      imgData: null,
      imgDataOfSpermDetection: imgDataOfSpermDetection,
      createdTime: DateTime.now().millisecondsSinceEpoch ~/ 1000,
    );

    if (config.saveCapturedVideoToPhotos) {
      await _saveVideoToPhotos(videoPath);
    }

    if (config.saveProceedImgsToPhotos) {
      await _saveImgToPhotos(outputPath);
      await _saveImgToPhotos(outputPath2);
    }
    print('----------识别到的精子数:${numOfDetectedSperms}');
    return result;
  }

  /// 保存视频到相册
  Future<void> _saveVideoToPhotos(String videoPath) async {
    try {
      final result = await ImageGallerySaverPlus.saveFile(videoPath);
      if (result['isSuccess'] == true) {
        print("Motility: Saved video to gallery");
      } else {
        print("Motility: Failed to save video to gallery");
      }
    } catch (e) {
      print("Motility: Failed to save video: $e");
    }
  }

  /// 保存图片到相册
  Future<void> _saveImgToPhotos(String imgPath) async {
    try {
      final file = File(imgPath);
      if (await file.exists()) {
        final result = await ImageGallerySaverPlus.saveFile(imgPath);
        if (result['isSuccess'] == true) {
          print("Motility: Saved image to gallery");
        } else {
          print("Motility: Failed to save image to gallery");
        }
      } else {
        print("Motility: Cannot load image from: $imgPath");
      }
    } catch (e) {
      print("Motility: Failed to save image: $e");
    }
  }
}