import 'dart:async';
import 'dart:io';
import 'dart:math';
import 'dart:typed_data';
import 'dart:ui';

import 'package:flutter/material.dart';
import 'package:flutter_audio_capture/flutter_audio_capture.dart';
import 'package:path_provider/path_provider.dart';
import 'package:just_audio/just_audio.dart';
import 'package:permission_handler/permission_handler.dart';

void main() => runApp(MyApp());

class MyApp extends StatefulWidget {
  @override
  _MyAppState createState() => _MyAppState();
}

class _MyAppState extends State<MyApp> {
  FlutterAudioCapture _plugin = new FlutterAudioCapture();
  String _audioInfo = "未开始录音";
  int _dataCount = 0;
  double _maxAmplitude = 0;
  int _startTime = 0;
  double? _actualSampleRate;
  int _requestedSampleRate = 44100;
  int _requestedBufferSize = 22050;
  int _wavSampleRate = 44100; // 默认WAV文件采样率

  // 音频播放器
  final AudioPlayer _audioPlayer = AudioPlayer();

  // 存储录制的音频数据
  List<Float32List> _audioChunks = [];
  bool _isRecording = false;
  String? _wavFilePath;

  // 可选的采样率
  final List<int> _availableSampleRates = [8000, 16000, 22050, 44100, 48000];

  // Android音频源
  int _androidAudioSource = 6; // ANDROID_AUDIOSRC_VOICERECOGNITION

  @override
  void initState() {
    super.initState();
    // 初始化并请求权限
    _initializeWithPermissions();
  }

  @override
  void dispose() {
    _audioPlayer.dispose();
    super.dispose();
  }

  // 请求权限并初始化音频捕获
  Future<void> _initializeWithPermissions() async {
    // 请求麦克风权限
    if (Platform.isIOS || Platform.isAndroid) {
      final status = await Permission.microphone.request();
      if (status != PermissionStatus.granted) {
        setState(() {
          _audioInfo = "错误: 麦克风权限被拒绝";
        });
        return;
      }
    }

    // 初始化音频捕获
    try {
      final result = await _plugin.init();
      print("音频捕获初始化结果: $result");
      if (result != true) {
        setState(() {
          _audioInfo = "警告: 音频捕获初始化不完整";
        });
      }
    } catch (e) {
      print("音频捕获初始化错误: $e");
      setState(() {
        _audioInfo = "错误: 音频捕获初始化失败 - $e";
      });
    }
  }

  Future<void> _startCapture() async {
    try {
      // 重置所有状态
      _dataCount = 0;
      _maxAmplitude = 0;
      _startTime = DateTime.now().millisecondsSinceEpoch;
      _actualSampleRate = null;
      _audioChunks = []; // 清空之前的录音数据
      _isRecording = true;
      _wavFilePath = null;

      // 重置音频播放器
      await _audioPlayer.stop();

      setState(() {
        _audioInfo =
            "录音中... 请求采样率: ${_requestedSampleRate}Hz, 缓冲区: ${_requestedBufferSize}";
      });

      // 优化缓冲区大小，iOS上使用较大的缓冲区以提高处理效率
      final bufferSize = Platform.isIOS ? 22050 : _requestedBufferSize;

      // 重新初始化以确保音频会话是活跃的
      if (Platform.isIOS) {
        await _plugin.init();
      }

      await _plugin.start((dynamic audioData) => listener(audioData), onError,
          sampleRate: _requestedSampleRate, // 使用请求的采样率，但实际会由设备决定
          bufferSize: bufferSize,
          androidAudioSource: _androidAudioSource);
    } catch (e) {
      print("启动录音失败: $e");
      setState(() {
        _audioInfo = "错误: 无法启动录音\n$e";
        _isRecording = false;
      });
    }
  }

  Future<void> _stopCapture() async {
    if (!_isRecording) return;

    _isRecording = false;
    await _plugin.stop();

    int duration = DateTime.now().millisecondsSinceEpoch - _startTime;

    // 保存WAV文件
    if (_audioChunks.isNotEmpty) {
      try {
        String path = await _saveAsWav();
        setState(() {
          _audioInfo = "录音已停止\n"
              "总时长: ${(duration / 1000).toStringAsFixed(2)}秒\n"
              "采样率: ${_actualSampleRate?.toStringAsFixed(1) ?? _requestedSampleRate}Hz\n"
              "数据包数量: $_dataCount\n"
              "最大振幅: ${_maxAmplitude.toStringAsFixed(4)}\n"
              "保存为WAV文件: $path";
          _wavFilePath = path;
        });

        // 预加载音频文件，准备播放
        await _audioPlayer.setFilePath(path);
      } catch (e) {
        setState(() {
          _audioInfo += "\n录音结束，但保存WAV文件失败: $e";
        });
      }
    } else {
      setState(() {
        _audioInfo += "\n录音结束，但没有捕获到音频数据";
      });
    }
  }

  // 尝试不同的播放速度
  Future<void> _playWithSpeed(double speed) async {
    if (_wavFilePath == null) {
      setState(() {
        _audioInfo += "\n没有可播放的录音";
      });
      return;
    }

    try {
      await _audioPlayer.setFilePath(_wavFilePath!);
      await _audioPlayer.setSpeed(speed);
      _audioPlayer.playerStateStream.listen((state) {
        if (state.processingState == ProcessingState.completed) {
          setState(() {
            _audioInfo += "\n播放完成";
          });
        }
      });
      await _audioPlayer.play();
      setState(() {
        _audioInfo += "\n正在以 ${speed}x 速度播放录音...";
      });
    } catch (e) {
      setState(() {
        _audioInfo += "\n播放录音失败: $e";
      });
    }
  }

  // 播放录音（正常速度）
  Future<void> _playRecording() async {
    await _playWithSpeed(1.0);
  }

  void listener(dynamic obj) {
    // 检查音频数据类型
    if (obj is Map<String, dynamic>) {
      // 处理iOS返回的Map格式数据
      final audioDataRaw = obj["audioData"];
      final actualSampleRateRaw = obj["actualSampleRate"];

      if (audioDataRaw is List) {
        _dataCount++;

        // 获取实际采样率
        if (_actualSampleRate == null && actualSampleRateRaw is num) {
          _actualSampleRate = actualSampleRateRaw.toDouble();
          print("实际采样率: $_actualSampleRate, 请求采样率: $_requestedSampleRate");

          // 自动设置WAV采样率与实际采样率一致，避免重采样
          setState(() {
            _wavSampleRate = _actualSampleRate?.toInt() ?? _requestedSampleRate;
          });
        }

        // 将音频数据存储起来
        if (_isRecording) {
          // 创建新的Float32List并手动复制数据
          final audioData = Float32List(audioDataRaw.length);
          try {
            for (int i = 0; i < audioDataRaw.length; i++) {
              final item = audioDataRaw[i];
              if (item is double) {
                audioData[i] = item.toDouble();
              } else if (item is int) {
                audioData[i] = item.toDouble();
              } else if (item is num) {
                audioData[i] = item.toDouble();
              } else {
                audioData[i] = double.tryParse(item.toString()) ?? 0.0;
              }
            }
          } catch (e) {
            print(
                "转换音频数据时出错: $e, 数据类型: ${audioDataRaw.runtimeType}, 第一个元素类型: ${audioDataRaw.isNotEmpty ? audioDataRaw.first.runtimeType : 'empty'}");
            return; // 如果转换失败，跳过此数据包
          }

          // 直接存储原始数据，不做任何处理
          _audioChunks.add(audioData);

          // 计算当前数据包的最大振幅
          double maxInPacket = 0;
          for (var sample in audioData) {
            if (sample.abs() > maxInPacket) {
              maxInPacket = sample.abs();
            }
          }

          // 更新全局最大振幅
          if (maxInPacket > _maxAmplitude) {
            _maxAmplitude = maxInPacket;
          }

          // 每10个数据包更新一次UI
          if (_dataCount % 10 == 0) {
            int duration = DateTime.now().millisecondsSinceEpoch - _startTime;
            setState(() {
              _audioInfo = "录音中...\n"
                  "请求采样率: ${_requestedSampleRate}Hz\n"
                  "实际采样率: ${_actualSampleRate?.toStringAsFixed(1) ?? '未知'}Hz\n"
                  "WAV文件采样率: ${_wavSampleRate}Hz\n"
                  "缓冲区大小: ${Platform.isIOS ? 22050 : _requestedBufferSize}\n"
                  "已接收数据包: $_dataCount\n"
                  "当前数据包大小: ${audioData.length}\n"
                  "最大振幅: ${_maxAmplitude.toStringAsFixed(2)}\n"
                  "已录制时长: ${(duration / 1000).toStringAsFixed(2)}秒";
            });
          }
        }
      }
    } else if (obj is List) {
      // 处理Android返回的List格式数据
      _dataCount++;

      // 获取实际采样率
      if (_actualSampleRate == null) {
        _actualSampleRate = _plugin.actualSampleRate;
        print("实际采样率: $_actualSampleRate, 请求采样率: $_requestedSampleRate");

        // 自动设置WAV采样率与实际采样率一致，避免重采样
        setState(() {
          _wavSampleRate = _actualSampleRate?.toInt() ?? _requestedSampleRate;
        });
      }

      // 将音频数据存储起来
      if (_isRecording) {
        // 创建新的Float32List并手动复制数据，避免类型转换错误
        final audioData = Float32List(obj.length);
        try {
          for (int i = 0; i < obj.length; i++) {
            // 确保正确处理各种可能的数据类型
            if (obj[i] is double) {
              audioData[i] = obj[i];
            } else if (obj[i] is int) {
              audioData[i] = obj[i].toDouble();
            } else if (obj[i] is num) {
              audioData[i] = obj[i].toDouble();
            } else {
              // 如果是其他类型，尝试转换为double
              audioData[i] = double.tryParse(obj[i].toString()) ?? 0.0;
            }
          }
        } catch (e) {
          print(
              "转换音频数据时出错: $e, 数据类型: ${obj.runtimeType}, 第一个元素类型: ${obj.isNotEmpty ? obj.first.runtimeType : 'empty'}");
          return; // 如果转换失败，跳过此数据包
        }

        // 直接存储原始数据，不做任何处理
        _audioChunks.add(audioData);

        // 计算当前数据包的最大振幅
        double maxInPacket = 0;
        for (var sample in audioData) {
          if (sample.abs() > maxInPacket) {
            maxInPacket = sample.abs();
          }
        }

        // 更新全局最大振幅
        if (maxInPacket > _maxAmplitude) {
          _maxAmplitude = maxInPacket;
        }

        // 每10个数据包更新一次UI
        if (_dataCount % 10 == 0) {
          int duration = DateTime.now().millisecondsSinceEpoch - _startTime;
          setState(() {
            _audioInfo = "录音中...\n"
                "请求采样率: ${_requestedSampleRate}Hz\n"
                "实际采样率: ${_actualSampleRate?.toStringAsFixed(1) ?? '未知'}Hz\n"
                "WAV文件采样率: ${_wavSampleRate}Hz\n"
                "缓冲区大小: ${Platform.isIOS ? 22050 : _requestedBufferSize}\n"
                "已接收数据包: $_dataCount\n"
                "当前数据包大小: ${audioData.length}\n"
                "最大振幅: ${_maxAmplitude.toStringAsFixed(2)}\n"
                "已录制时长: ${(duration / 1000).toStringAsFixed(2)}秒";
          });
        }
      }
    } else {
      print("收到未知类型数据: $obj (${obj.runtimeType})");
    }
  }

  void onError(Object e) {
    print(e);
    setState(() {
      _audioInfo = "录音错误: $e";
    });
  }

  // 将Float32List数据转换为WAV文件
  Future<String> _saveAsWav() async {
    // 使用实际采样率作为WAV文件采样率，避免重采样
    final int wavSampleRate =
        _actualSampleRate?.toInt() ?? _requestedSampleRate;
    final int originalSampleRate =
        _actualSampleRate?.toInt() ?? _requestedSampleRate;

    // 计算总样本数
    int totalSamples = 0;
    for (var chunk in _audioChunks) {
      totalSamples += chunk.length;
    }

    // 合并所有音频数据
    final completeAudioData = Float32List(totalSamples);
    int offset = 0;
    for (var chunk in _audioChunks) {
      completeAudioData.setRange(offset, offset + chunk.length, chunk);
      offset += chunk.length;
    }

    // 直接使用原始数据，不做后处理
    final rawData = completeAudioData;

    print("使用设备原生采样率: $wavSampleRate Hz 保存WAV文件");

    // 将Float32List转换为Int16List (16位PCM)
    final pcmData = Int16List(rawData.length);
    for (int i = 0; i < rawData.length; i++) {
      // 将浮点值范围(-1.0 to 1.0)转换为16位整数范围(-32768 to 32767)
      pcmData[i] = (rawData[i] * 32767).clamp(-32768, 32767).toInt();
    }

    // 创建WAV文件头
    final ByteData wavHeader = _createWavHeader(pcmData.length, wavSampleRate);

    // 创建最终的WAV文件字节数据
    final wavBytes = BytesBuilder();
    wavBytes.add(wavHeader.buffer.asUint8List());
    wavBytes.add(pcmData.buffer.asUint8List());

    // 保存到文件
    final directory = await getApplicationDocumentsDirectory();
    final timestamp = DateTime.now().millisecondsSinceEpoch;
    final filePath =
        '${directory.path}/recording_${timestamp}_${wavSampleRate}hz.wav';
    final file = File(filePath);
    await file.writeAsBytes(wavBytes.toBytes());

    setState(() {
      _audioInfo += "\n原始采样率: ${originalSampleRate}Hz";
      _audioInfo += "\nWAV文件采样率: ${wavSampleRate}Hz";
      _audioInfo += "\n已保存为WAV文件";
    });

    return filePath;
  }

  // 创建WAV文件头
  ByteData _createWavHeader(int totalSamples, int sampleRate) {
    final ByteData header = ByteData(44); // WAV文件头是44字节
    final int fileSize = 36 + (totalSamples * 2); // 16位PCM，每个样本2字节

    // RIFF头
    header.setUint8(0, 'R'.codeUnitAt(0));
    header.setUint8(1, 'I'.codeUnitAt(0));
    header.setUint8(2, 'F'.codeUnitAt(0));
    header.setUint8(3, 'F'.codeUnitAt(0));
    header.setUint32(4, fileSize, Endian.little); // 文件大小
    header.setUint8(8, 'W'.codeUnitAt(0));
    header.setUint8(9, 'A'.codeUnitAt(0));
    header.setUint8(10, 'V'.codeUnitAt(0));
    header.setUint8(11, 'E'.codeUnitAt(0));

    // fmt子块
    header.setUint8(12, 'f'.codeUnitAt(0));
    header.setUint8(13, 'm'.codeUnitAt(0));
    header.setUint8(14, 't'.codeUnitAt(0));
    header.setUint8(15, ' '.codeUnitAt(0));
    header.setUint32(16, 16, Endian.little); // fmt块大小
    header.setUint16(20, 1, Endian.little); // 音频格式 (1 = PCM)
    header.setUint16(22, 1, Endian.little); // 通道数 (1 = 单声道)
    header.setUint32(24, sampleRate, Endian.little); // 采样率
    header.setUint32(28, sampleRate * 2, Endian.little); // 字节率 = 采样率 * 每个样本的字节数
    header.setUint16(32, 2, Endian.little); // 块对齐 = 通道数 * 每个样本的字节数
    header.setUint16(34, 16, Endian.little); // 位深度 (16 bits)

    // data子块
    header.setUint8(36, 'd'.codeUnitAt(0));
    header.setUint8(37, 'a'.codeUnitAt(0));
    header.setUint8(38, 't'.codeUnitAt(0));
    header.setUint8(39, 'a'.codeUnitAt(0));
    header.setUint32(
        40, totalSamples * 2, Endian.little); // 数据大小 = 样本数 * 每个样本的字节数

    return header;
  }

  // 采样率选择下拉菜单
  Widget _buildSampleRateSelector() {
    return Row(
      mainAxisAlignment: MainAxisAlignment.center,
      children: [
        Text("请求采样率: "),
        DropdownButton<int>(
          value: _requestedSampleRate,
          onChanged: (int? newValue) {
            if (newValue != null) {
              setState(() {
                _requestedSampleRate = newValue;
              });
            }
          },
          items: _availableSampleRates.map<DropdownMenuItem<int>>((int value) {
            return DropdownMenuItem<int>(
              value: value,
              child: Text("$value Hz"),
            );
          }).toList(),
        ),
        SizedBox(width: 20),
        Text("WAV文件采样率: "),
        DropdownButton<int>(
          value: _wavSampleRate,
          onChanged: (int? newValue) {
            if (newValue != null) {
              setState(() {
                _wavSampleRate = newValue;
              });
            }
          },
          items: _availableSampleRates.map<DropdownMenuItem<int>>((int value) {
            return DropdownMenuItem<int>(
              value: value,
              child: Text("$value Hz"),
            );
          }).toList(),
        ),
      ],
    );
  }

  @override
  Widget build(BuildContext context) {
    return MaterialApp(
      home: Scaffold(
        appBar: AppBar(
          title: const Text('Flutter Audio Capture Plugin'),
        ),
        body: Column(children: [
          _buildSampleRateSelector(),
          Expanded(
            child: Center(
              child: SingleChildScrollView(
                padding: const EdgeInsets.all(16.0),
                child: Text(
                  _audioInfo,
                  style: TextStyle(fontSize: 16),
                ),
              ),
            ),
          ),
          Row(
            mainAxisAlignment: MainAxisAlignment.center,
            children: [
              ElevatedButton(
                onPressed:
                    _wavFilePath != null ? () => _playWithSpeed(0.5) : null,
                child: Text("0.5x"),
              ),
              SizedBox(width: 10),
              ElevatedButton(
                onPressed:
                    _wavFilePath != null ? () => _playWithSpeed(1.0) : null,
                child: Text("1.0x"),
              ),
              SizedBox(width: 10),
              ElevatedButton(
                onPressed:
                    _wavFilePath != null ? () => _playWithSpeed(2.0) : null,
                child: Text("2.0x"),
              ),
            ],
          ),
          Expanded(
              child: Row(
            children: [
              Expanded(
                  child: Center(
                      child: FloatingActionButton(
                          onPressed: _startCapture,
                          backgroundColor: Colors.red,
                          child: Icon(Icons.mic)))),
              Expanded(
                  child: Center(
                      child: FloatingActionButton(
                          onPressed: _stopCapture,
                          backgroundColor: Colors.grey,
                          child: Icon(Icons.stop)))),
              Expanded(
                  child: Center(
                      child: FloatingActionButton(
                          onPressed:
                              _wavFilePath != null ? _playRecording : null,
                          backgroundColor:
                              _wavFilePath != null ? Colors.blue : Colors.grey,
                          child: Icon(Icons.play_arrow)))),
            ],
          ))
        ]),
      ),
    );
  }
}
