import 'dart:async';
import 'dart:io';
import 'dart:typed_data';
import 'dart:math' as math;

import 'package:flutter/services.dart';

const AUDIO_CAPTURE_EVENT_CHANNEL_NAME = "ymd.dev/audio_capture_event_channel";
const AUDIO_CAPTURE_METHOD_CHANNEL_NAME =
    "ymd.dev/audio_capture_method_channel";

const ANDROID_AUDIOSRC_DEFAULT = 0;
const ANDROID_AUDIOSRC_MIC = 1;
const ANDROID_AUDIOSRC_CAMCORDER = 5;
const ANDROID_AUDIOSRC_VOICERECOGNITION = 6;
const ANDROID_AUDIOSRC_VOICECOMMUNICATION = 7;
const ANDROID_AUDIOSRC_UNPROCESSED = 9;

/// 日志工具类，用于控制调试信息的显示与隐藏
class AudioLogger {
  /// 是否启用日志输出
  static bool enableLogging = false;

  /// 日志标签
  static const String _tag = "AudioCapture";

  /// 输出调试日志
  static void d(String message) {
    if (enableLogging) {
      print("[$_tag] $message");
    }
  }

  /// 输出错误日志 (即使禁用日志也会显示)
  static void e(String message) {
    print("[$_tag][ERROR] $message");
  }
}

class FlutterAudioCapture {
  static const EventChannel _audioCaptureEventChannel =
      EventChannel(AUDIO_CAPTURE_EVENT_CHANNEL_NAME);

  // ignore: cancel_subscriptions
  StreamSubscription? _audioCaptureEventChannelSubscription;

  static const MethodChannel _audioCaptureMethodChannel =
      MethodChannel(AUDIO_CAPTURE_METHOD_CHANNEL_NAME);

  double? _actualSampleRate;

  bool? _initialized;

  // 默认增益系数，可以通过构造函数设置
  final double _androidVolumeGain;
  final double _iosVolumeGain;
  final bool _normalizeVolume;

  // 回调频率控制
  final int _targetCallbackFrequency; // 目标回调频率(次/秒)
  final bool _controlCallbackFrequency; // 是否控制回调频率

  // 重采样控制
  final int _targetSampleRate; // 目标采样率
  final bool _enableResampling; // 是否启用重采样

  // 日志控制
  final bool _enableLogging; // 是否启用日志输出

  // 用于缓存和频率控制的变量
  DateTime? _lastCallbackTime;
  List<Float32List> _bufferCache = [];
  Float32List? _lastSentBuffer;

  // 构造函数，允许设置音量增益参数和回调频率控制
  FlutterAudioCapture({
    double androidVolumeGain = 1.8, // Android 默认增益系数，可以调整此值
    double iosVolumeGain = 1.0, // iOS 默认增益系数
    bool normalizeVolume = true, // 是否对音量进行归一化处理
    int targetCallbackFrequency = 10, // 目标回调频率(每秒钟回调次数)
    bool controlCallbackFrequency = false, // 是否控制回调频率
    int targetSampleRate = 16000, // 目标采样率
    bool enableResampling = false, // 是否启用重采样(主要用于iOS)
    bool enableLogging = false, // 是否启用详细日志输出
  })  : _androidVolumeGain = androidVolumeGain,
        _iosVolumeGain = iosVolumeGain,
        _normalizeVolume = normalizeVolume,
        _targetCallbackFrequency = targetCallbackFrequency,
        _controlCallbackFrequency = controlCallbackFrequency,
        _targetSampleRate = targetSampleRate,
        _enableResampling = enableResampling,
        _enableLogging = enableLogging {
    // 设置全局日志开关
    AudioLogger.enableLogging = enableLogging;
  }

  Future<bool?> init() async {
    // Only init once
    if (_initialized != null) return _initialized;
    _initialized = await _audioCaptureMethodChannel.invokeMethod<bool>("init");
    return _initialized;
  }

  /// Starts listenening to audio.
  ///
  /// Uses [sampleRate] and [bufferSize] for capturing audio.
  /// Uses [androidAudioSource] to determine recording type on Android.
  /// When [waitForFirstDataOnAndroid] is set, it waits for [firstDataTimeout] duration on first data to arrive.
  /// Will not listen if first date does not arrive in time. Set as [true] by default on Android.
  /// When [waitForFirstDataOnIOS] is set, it waits for [firstDataTimeout] duration on first data to arrive.
  /// Known to not work reliably on iOS and set as [false] by default.
  Future<void> start(void Function(Float32List) listener, Function onError,
      {int sampleRate = 44100,
      int bufferSize = 5000,
      int androidAudioSource = ANDROID_AUDIOSRC_DEFAULT,
      Duration firstDataTimeout = const Duration(seconds: 1),
      bool waitForFirstDataOnAndroid = true,
      bool waitForFirstDataOnIOS = false}) async {
    if (_initialized == null) {
      throw Exception("FlutterAudioCapture must be initialized before use");
    }

    if (_initialized == false) {
      throw Exception("FlutterAudioCapture failed to initialize");
    }

    // We are already listening
    if (_audioCaptureEventChannelSubscription != null) return;

    // 重置回调控制状态
    _lastCallbackTime = null;
    _bufferCache = [];
    _lastSentBuffer = null;

    // 如果需要控制回调频率，自动调整iOS的缓冲区大小
    int actualBufferSize = bufferSize;
    if (Platform.isIOS && _controlCallbackFrequency) {
      // 这里预估iOS的采样率为44100Hz，初次设置的缓冲区大小
      // 实际会在收到第一个回调时根据真实采样率调整
      actualBufferSize = (sampleRate / _targetCallbackFrequency).round();
      AudioLogger.d(
          "iOS预设缓冲区大小: $actualBufferSize (目标回调频率: $_targetCallbackFrequency Hz)");
    }

    // init channel stream
    final stream = _audioCaptureEventChannel.receiveBroadcastStream({
      "sampleRate": sampleRate,
      "bufferSize": actualBufferSize,
      "audioSource": androidAudioSource,
    });

    _actualSampleRate = null;
    var audioStream = stream.map((event) {
      if (event is Map) {
        // iOS 设备
        _actualSampleRate = (event['actualSampleRate'] as num?)?.toDouble();

        // 检查是否需要在运行时调整缓冲区大小（仅第一次）
        if (_controlCallbackFrequency &&
            _lastCallbackTime == null &&
            _actualSampleRate != null) {
          // 计算理想的缓冲区大小，但这里只是记录，无法动态调整已创建的AudioEngine
          final idealBufferSize =
              (_actualSampleRate! / _targetCallbackFrequency).round();
          AudioLogger.d(
              "iOS实际采样率: $_actualSampleRate Hz, 理想缓冲区大小: $idealBufferSize");
        }

        final audioData = _convertToFloat32List(event['audioData']);

        // 应用音量归一化
        final volumeNormalizedData = _normalizeVolume
            ? _normalizeAudioVolume(audioData, isAndroid: false)
            : audioData;

        // 应用重采样（如果启用）
        final processedData = (_enableResampling &&
                _actualSampleRate != null &&
                _actualSampleRate != _targetSampleRate)
            ? _resampleAudio(
                volumeNormalizedData, _actualSampleRate!, _targetSampleRate)
            : volumeNormalizedData;

        // 应用回调频率控制
        if (_controlCallbackFrequency) {
          return _controlCallback(processedData);
        }
        return processedData;
      } else {
        // Android 设备
        final audioData = _convertToFloat32List(event);

        // 应用音量归一化
        final volumeNormalizedData = _normalizeVolume
            ? _normalizeAudioVolume(audioData, isAndroid: true)
            : audioData;

        // Android 设备一般已经是目标采样率，但也可以重采样
        final processedData = (_enableResampling &&
                _actualSampleRate != null &&
                _actualSampleRate != _targetSampleRate)
            ? _resampleAudio(
                volumeNormalizedData, _actualSampleRate!, _targetSampleRate)
            : volumeNormalizedData;

        // 应用回调频率控制
        if (_controlCallbackFrequency) {
          return _controlCallback(processedData);
        }
        return processedData;
      }
    });

    // Do we need to wait for first data?
    final waitForFirstData =
        (Platform.isAndroid && waitForFirstDataOnAndroid) ||
            (Platform.isIOS && waitForFirstDataOnIOS);

    Completer<void> completer = Completer();
    // Prevent stream for starting over because we have no listenre between firstWhere check and this line which initally was at the end of the code
    _audioCaptureEventChannelSubscription = audioStream
        .skipWhile((element) => !completer.isCompleted)
        .where((data) => data.isNotEmpty) // 过滤掉空数据
        .listen(listener, onError: onError);
    if (waitForFirstData) {
      try {
        await audioStream
            .firstWhere((element) => (_actualSampleRate ?? 0) > 10)
            .timeout(firstDataTimeout);
      } catch (e) {
        // If we timeout, cancel the stream and throw error
        completer.completeError(e);
        await stop();
        rethrow;
      }
    }
    completer.complete();
  }

  // 控制回调频率
  Float32List _controlCallback(Float32List audioData) {
    if (audioData.isEmpty) return audioData;

    final now = DateTime.now();
    final minInterval =
        Duration(milliseconds: (1000 / _targetCallbackFrequency).round());

    // 缓存当前数据
    _bufferCache.add(audioData);

    // 如果是第一次回调或距离上次回调时间已达到目标间隔
    if (_lastCallbackTime == null ||
        now.difference(_lastCallbackTime!) >= minInterval) {
      _lastCallbackTime = now;

      // 合并缓存的数据
      if (_bufferCache.length > 1) {
        // 计算合并后的总长度
        int totalLength = 0;
        for (var buffer in _bufferCache) {
          totalLength += buffer.length;
        }

        // 创建合并后的数据
        final mergedData = Float32List(totalLength);
        int offset = 0;
        for (var buffer in _bufferCache) {
          mergedData.setRange(offset, offset + buffer.length, buffer);
          offset += buffer.length;
        }

        // 清空缓存
        _bufferCache = [];
        _lastSentBuffer = mergedData;
        return mergedData;
      } else if (_bufferCache.length == 1) {
        // 只有一个缓存，直接返回
        final data = _bufferCache[0];
        _bufferCache = [];
        _lastSentBuffer = data;
        return data;
      }
    }

    // 如果时间间隔不够，返回空数组
    return Float32List(0);
  }

  // 重采样音频数据 - 改进版，解决音调失真问题
  Float32List _resampleAudio(
      Float32List audioData, double sourceSampleRate, int targetSampleRate) {
    if (audioData.isEmpty) return audioData;
    if (sourceSampleRate == targetSampleRate) return audioData;

    // 检查采样率参数是否正确
    assert(sourceSampleRate > 0, "源采样率必须大于0");
    assert(targetSampleRate > 0, "目标采样率必须大于0");

    // 打印重采样信息，帮助调试
    AudioLogger.d("音频重采样: 从 $sourceSampleRate Hz 到 $targetSampleRate Hz");
    AudioLogger.d("输入音频长度: ${audioData.length} 样本");

    // 计算重采样比率 (确保方向正确)
    final ratio = targetSampleRate / sourceSampleRate;

    // 计算目标数据长度 (避免舍入错误)
    final targetLength = (audioData.length * ratio).round();

    AudioLogger.d("重采样比率: $ratio, 输出音频长度: $targetLength 样本");

    // 创建结果数组
    final result = Float32List(targetLength);

    // 检查边界情况
    if (targetLength <= 0) {
      AudioLogger.e("警告: 重采样后音频长度为0，返回空数组");
      return Float32List(0);
    }

    if (ratio > 10 || ratio < 0.1) {
      AudioLogger.e("警告: 重采样比率异常 ($ratio)，可能导致音质严重失真");
    }

    // 线性插值重采样 - 使用更精确的计算
    for (int i = 0; i < targetLength; i++) {
      // 计算源数据中的精确位置
      final srcPos = i / ratio;

      // 确保索引在有效范围内
      if (srcPos >= audioData.length - 1) {
        result[i] = audioData[audioData.length - 1];
        continue;
      }

      // 获取整数部分和小数部分
      final srcPosInt = srcPos.floor();
      final fraction = srcPos - srcPosInt;

      // 确保索引有效
      if (srcPosInt < 0 || srcPosInt >= audioData.length - 1) {
        result[i] = 0.0;
        continue;
      }

      // 线性插值 (保持精度)
      final sample1 = audioData[srcPosInt];
      final sample2 = audioData[srcPosInt + 1];
      result[i] = sample1 + fraction * (sample2 - sample1);
    }

    // 检查输出音频是否有效
    double maxAmplitude = 0;
    for (int i = 0; i < result.length; i++) {
      final abs = result[i].abs();
      if (abs > maxAmplitude) maxAmplitude = abs;
    }

    AudioLogger.d("重采样后最大振幅: $maxAmplitude");

    // 如果重采样后音频过弱，可能需要归一化
    if (maxAmplitude < 0.01) {
      AudioLogger.e("警告: 重采样后音频振幅很小，可能听不清");
    }

    return result;
  }

  // 将任何类型的音频数据转换为Float32List
  Float32List _convertToFloat32List(dynamic data) {
    if (data is Float32List) return data;
    if (data is List) {
      final result = Float32List(data.length);
      for (int i = 0; i < data.length; i++) {
        if (data[i] is double) {
          result[i] = data[i];
        } else if (data[i] is int) {
          result[i] = data[i].toDouble();
        } else if (data[i] is num) {
          result[i] = data[i].toDouble();
        } else {
          result[i] = 0.0;
        }
      }
      return result;
    }
    return Float32List(0); // 返回空列表作为后备
  }

  // 音频音量归一化处理，使iOS和Android录音音量一致
  Float32List _normalizeAudioVolume(Float32List audioData,
      {required bool isAndroid}) {
    if (audioData.isEmpty) return audioData;

    // 创建新数组以避免修改原始数据
    final normalizedData = Float32List(audioData.length);

    // 根据平台选择增益系数
    final volumeGain = isAndroid ? _androidVolumeGain : _iosVolumeGain;

    // 应用增益系数
    for (int i = 0; i < audioData.length; i++) {
      normalizedData[i] = audioData[i] * volumeGain;

      // 防止值超出范围（-1.0 到 1.0）
      if (normalizedData[i] > 1.0) {
        normalizedData[i] = 1.0;
      } else if (normalizedData[i] < -1.0) {
        normalizedData[i] = -1.0;
      }
    }

    return normalizedData;
  }

  Future<void> stop() async {
    if (_audioCaptureEventChannelSubscription == null) //
      return;
    final tempListener = _audioCaptureEventChannelSubscription;
    _audioCaptureEventChannelSubscription = null;
    await tempListener!.cancel();

    // 清空缓存
    _bufferCache = [];
    _lastSentBuffer = null;
    _lastCallbackTime = null;
  }

  double? get actualSampleRate => _actualSampleRate;

  // 获取经过重采样处理后的实际输出采样率
  int get outputSampleRate =>
      _enableResampling ? _targetSampleRate : (_actualSampleRate?.toInt() ?? 0);

  // 获取最后一次发送的缓冲区
  Float32List? get lastSentBuffer => _lastSentBuffer;

  // 启用或禁用日志输出
  void setLoggingEnabled(bool enabled) {
    AudioLogger.enableLogging = enabled;
  }

  // 获取日志开关状态
  bool get isLoggingEnabled => AudioLogger.enableLogging;
}

extension MapUtil on Map {
  T get<T>(String key) {
    return this[key]!;
  }

  T? getOrNull<T>(String key) {
    return this[key];
  }
}
