package com.smart.translator.ui.voice;

import android.Manifest;
import android.app.Application;
import android.content.ClipData;
import android.content.ClipboardManager;
import android.content.Context;
import android.content.Intent;
import android.media.AudioFormat;
import android.media.AudioRecord;
import android.media.MediaRecorder;
import android.os.Build;
import android.os.Handler;
import android.os.Looper;
import android.speech.tts.TextToSpeech;
import android.widget.Toast;
import androidx.annotation.NonNull;
import androidx.lifecycle.AndroidViewModel;
import androidx.lifecycle.LiveData;
import androidx.lifecycle.MutableLiveData;
import com.smart.translator.data.model.Language;
import com.smart.translator.data.repository.TranslationRepository;
//import com.smart.translator.service.AudioCaptureService;
import org.json.JSONException;
import org.json.JSONObject;
import java.util.Arrays;
import java.util.List;
import java.util.Locale;
import okhttp3.OkHttpClient;
import okhttp3.Request;
import okhttp3.WebSocket;
import okhttp3.WebSocketListener;
import android.content.pm.PackageManager;
import java.util.ArrayList;
import android.util.Log;

public class VoiceViewModel extends AndroidViewModel {
    private static final String TAG = "VoiceViewModel";
    private static final String AUDIO_TAG = "VoiceViewModel_Audio";
    private static final String CALL_TAG = "VoiceViewModel_Call";
    private static final String WS_TAG = "VoiceViewModel_WebSocket";
    private static final int MAX_RECONNECT_ATTEMPTS = 3;
    private int reconnectAttempts = 0;
    private boolean isReconnecting = false;
    private boolean isStartFrameSent = false;
    private boolean isEndFrameSent = false;
    private boolean isWebSocketReady = false;
    private List<byte[]> pendingAudioDataList = new ArrayList<>();
    private final Object audioLock = new Object();
    private boolean isCallStateChanging = false;
    private boolean isWaitingForEndFrame = false;
    
    private final List<Language> languages = Arrays.asList(
        new Language("zh", "中文"),
        new Language("en", "英语"),
        new Language("ja", "日语"),
        new Language("ko", "韩语"),
        new Language("fr", "法语"),
        new Language("de", "德语"),
        new Language("es", "西班牙语"),
        new Language("ru", "俄语")
    );
    private final MutableLiveData<Integer> sourceLangIndex = new MutableLiveData<>(0);
    private final MutableLiveData<Integer> targetLangIndex = new MutableLiveData<>(1);
    private final MutableLiveData<Boolean> isRecording = new MutableLiveData<>(false);
    private final MutableLiveData<String> voiceText = new MutableLiveData<>("");
    private final MutableLiveData<String> translatedText = new MutableLiveData<>("");
    private final MutableLiveData<Boolean> isLoading = new MutableLiveData<>(false);
    private final MutableLiveData<String> error = new MutableLiveData<>("");
    private AudioRecord audioRecord;
    private boolean isRecordingFlag = false;
    private WebSocket webSocket;
    private Handler mainHandler = new Handler(Looper.getMainLooper());
    private final TranslationRepository translationRepository;
    private TextToSpeech tts;
    private static final int SAMPLE_RATE = 8000;
    private static final int CHANNEL_CONFIG = AudioFormat.CHANNEL_IN_MONO;
    private static final int AUDIO_FORMAT = AudioFormat.ENCODING_PCM_16BIT;
    private static final int BUFFER_SIZE = Math.max(2048, AudioRecord.getMinBufferSize(SAMPLE_RATE, CHANNEL_CONFIG, AUDIO_FORMAT));
    private static final String WS_PATH = "/ws/speech_v2";
    
    // 获取服务器地址
    private String getServerUrl() {
        try {
            android.content.SharedPreferences prefs = getApplication().getSharedPreferences("app_config", Context.MODE_PRIVATE);
            return prefs.getString("server_url", "qixin.yeshan.fun");
        } catch (Exception e) {
            Log.e(TAG, "获取服务器地址失败", e);
            return "qixin.yeshan.fun";
        }
    }
    
    // 构建WebSocket URL
    private String buildWebSocketUrl() {
        String serverUrl = getServerUrl();
        return "ws://" + serverUrl + WS_PATH;
    }
    private android.media.AudioManager audioManager;
    private android.telephony.PhoneStateListener phoneStateListener;
    private android.telephony.TelephonyManager telephonyManager;
    private boolean isInCall = false;
    private static final String ACTION_AUDIO_DATA = "com.smart.translator.ui.voice.ACTION_AUDIO_DATA";
    private static final String EXTRA_AUDIO_DATA = "com.smart.translator.ui.voice.EXTRA_AUDIO_DATA";
    private static final String EXTRA_DATA_LENGTH = "com.smart.translator.ui.voice.EXTRA_DATA_LENGTH";

    public static class AudioDataReceiver extends android.content.BroadcastReceiver {
        private VoiceViewModel viewModel;

        // 添加无参构造函数，供系统实例化使用
        public AudioDataReceiver() {
            this.viewModel = null;
        }

        public AudioDataReceiver(VoiceViewModel viewModel) {
            this.viewModel = viewModel;
        }

        public void setViewModel(VoiceViewModel viewModel) {
            this.viewModel = viewModel;
        }

        @Override
        public void onReceive(android.content.Context context, android.content.Intent intent) {
            if (ACTION_AUDIO_DATA.equals(intent.getAction()) && viewModel != null) {
                byte[] audioData = intent.getByteArrayExtra(EXTRA_AUDIO_DATA);
                int length = intent.getIntExtra(EXTRA_DATA_LENGTH, 0);
                if (audioData != null && length > 0) {
                    viewModel.sendAudioFrame(audioData, length);
                }
            }
        }
    }

    private AudioDataReceiver audioDataReceiver;

    public VoiceViewModel(@NonNull Application application) {
        super(application);
        translationRepository = new TranslationRepository();
        audioManager = (android.media.AudioManager) application.getSystemService(Context.AUDIO_SERVICE);
        telephonyManager = (android.telephony.TelephonyManager) application.getSystemService(Context.TELEPHONY_SERVICE);
        
        tts = new TextToSpeech(application, status -> {
            if (status != TextToSpeech.ERROR) {
                tts.setLanguage(Locale.CHINESE);
            }
        });

        audioDataReceiver = new AudioDataReceiver(this);
    }

    public List<Language> getLanguages() { return languages; }
    public LiveData<Integer> getSourceLangIndex() { return sourceLangIndex; }
    public LiveData<Integer> getTargetLangIndex() { return targetLangIndex; }
    public LiveData<Boolean> getIsRecording() { return isRecording; }
    public LiveData<String> getVoiceText() { return voiceText; }
    public LiveData<String> getTranslatedText() { return translatedText; }
    public LiveData<Boolean> getIsLoading() { return isLoading; }
    public LiveData<String> getError() { return error; }

    public void setSourceLangIndex(int idx) { sourceLangIndex.setValue(idx); }
    public void setTargetLangIndex(int idx) { targetLangIndex.setValue(idx); }
    public void switchLanguages() {
        Integer src = sourceLangIndex.getValue();
        Integer tgt = targetLangIndex.getValue();
        if (src != null && tgt != null) {
            sourceLangIndex.setValue(tgt);
            targetLangIndex.setValue(src);
        }
    }

    private void initWebSocket() {
        android.util.Log.d(WS_TAG, "初始化WebSocket连接");
        OkHttpClient client = new OkHttpClient.Builder()
            .pingInterval(30, java.util.concurrent.TimeUnit.SECONDS)
            .build();
        Request request = new Request.Builder().url(buildWebSocketUrl()).build();
        webSocket = client.newWebSocket(request, new WebSocketListener() {
            @Override
            public void onOpen(WebSocket webSocket, okhttp3.Response response) {
                android.util.Log.d(WS_TAG, "WebSocket连接已打开");
                reconnectAttempts = 0;
                isReconnecting = false;
                isWebSocketReady = true;
                isStartFrameSent = false;
                isEndFrameSent = false;
                
                // 发送开始帧
                try {
                    JSONObject startFrame = new JSONObject();
                    startFrame.put("type", "audio");
                    startFrame.put("frame_type", 0);
                    startFrame.put("data", "");
                    webSocket.send(startFrame.toString());
                    isStartFrameSent = true;
                    android.util.Log.d(WS_TAG, "发送开始帧");

                    // 发送缓存的音频数据
                    synchronized (audioLock) {
                        if (!pendingAudioDataList.isEmpty()) {
                            android.util.Log.d(WS_TAG, "开始发送缓存的音频数据，共 " + pendingAudioDataList.size() + " 帧");
                            for (byte[] audioData : pendingAudioDataList) {
                                sendAudioFrameInternal(audioData, audioData.length);
                            }
                            pendingAudioDataList.clear();
                            android.util.Log.d(WS_TAG, "缓存的音频数据发送完成");
                        }
                    }
                } catch (JSONException e) {
                    android.util.Log.e(WS_TAG, "发送开始帧失败", e);
                }
            }

            @Override
            public void onMessage(WebSocket webSocket, String text) {
                android.util.Log.d(WS_TAG, "收到服务器消息: " + text);
                try {
                    JSONObject json = new JSONObject(text);
                    if (json.has("type") && json.getString("type").equals("result") && json.has("text")) {
                        String recognizedText = json.getString("text");
                        mainHandler.post(() -> {
                            String currentText = voiceText.getValue() != null ? voiceText.getValue() : "";
                            voiceText.setValue(currentText + recognizedText);
                        });
                    }
                } catch (JSONException e) {
                    android.util.Log.e(WS_TAG, "解析服务器消息失败", e);
                }
            }

            @Override
            public void onFailure(WebSocket webSocket, Throwable t, okhttp3.Response response) {
                android.util.Log.e(WS_TAG, "WebSocket连接失败", t);
                isWebSocketReady = false;
                if (isRecordingFlag && !isReconnecting && reconnectAttempts < MAX_RECONNECT_ATTEMPTS) {
                    isReconnecting = true;
                    reconnectAttempts++;
                    android.util.Log.d(WS_TAG, "尝试重新连接 WebSocket，第 " + reconnectAttempts + " 次");
                    mainHandler.postDelayed(() -> {
                        initWebSocket();
                        isReconnecting = false;
                    }, 1000 * reconnectAttempts);
                } else {
                    mainHandler.post(() -> {
                        error.setValue("WebSocket连接失败: " + t.getMessage());
                        isRecording.setValue(false);
                    });
                }
            }

            @Override
            public void onClosed(WebSocket webSocket, int code, String reason) {
                android.util.Log.d(WS_TAG, "WebSocket连接已关闭: " + reason);
                isWebSocketReady = false;
            }
        });
    }

    private void initAudioRecord(int audioSource) {
        android.util.Log.d(AUDIO_TAG, String.format("初始化AudioRecord - 音频源: %s, 采样率: %dHz, 声道: %s, 位深: %dbit",
            audioSource == MediaRecorder.AudioSource.VOICE_COMMUNICATION ? "VOICE_COMMUNICATION" : 
            audioSource == MediaRecorder.AudioSource.MIC ? "MIC" : "VOICE_CALL",
            SAMPLE_RATE,
            CHANNEL_CONFIG == AudioFormat.CHANNEL_IN_MONO ? "单声道" : "立体声",
            AUDIO_FORMAT == AudioFormat.ENCODING_PCM_16BIT ? 16 : 8));

        // 验证音频参数
        int minBufferSize = AudioRecord.getMinBufferSize(SAMPLE_RATE, CHANNEL_CONFIG, AUDIO_FORMAT);
        if (minBufferSize == AudioRecord.ERROR || minBufferSize == AudioRecord.ERROR_BAD_VALUE) {
            android.util.Log.e(AUDIO_TAG, "音频参数不支持");
            return;
        }

        android.util.Log.d(AUDIO_TAG, String.format("最小缓冲区大小: %d, 实际缓冲区大小: %d", minBufferSize, BUFFER_SIZE));

        // 尝试不同的音频源
        if (audioRecord != null) {
            audioRecord.release();
            audioRecord = null;
        }

        // 首先尝试VOICE_COMMUNICATION
        audioRecord = new AudioRecord(
            MediaRecorder.AudioSource.VOICE_COMMUNICATION,
            SAMPLE_RATE,
            CHANNEL_CONFIG,
            AUDIO_FORMAT,
            BUFFER_SIZE
        );

        if (audioRecord.getState() != AudioRecord.STATE_INITIALIZED) {
            android.util.Log.e(AUDIO_TAG, "VOICE_COMMUNICATION初始化失败，尝试MIC");
            audioRecord.release();
            
            audioRecord = new AudioRecord(
                MediaRecorder.AudioSource.MIC,
                SAMPLE_RATE,
                CHANNEL_CONFIG,
                AUDIO_FORMAT,
                BUFFER_SIZE
            );
        }

        if (audioRecord.getState() != AudioRecord.STATE_INITIALIZED) {
            android.util.Log.e(AUDIO_TAG, "MIC初始化失败，尝试VOICE_CALL");
            audioRecord.release();
            
            audioRecord = new AudioRecord(
                MediaRecorder.AudioSource.VOICE_CALL,
                SAMPLE_RATE,
                CHANNEL_CONFIG,
                AUDIO_FORMAT,
                BUFFER_SIZE
            );
        }

        if (audioRecord.getState() != AudioRecord.STATE_INITIALIZED) {
            android.util.Log.e(AUDIO_TAG, "AudioRecord初始化失败");
            return;
        }

        android.util.Log.d(AUDIO_TAG, String.format("AudioRecord初始化成功 - 音频源: %s, 采样率: %dHz, 声道: %s, 位深: %dbit",
            audioSource == MediaRecorder.AudioSource.VOICE_COMMUNICATION ? "VOICE_COMMUNICATION" : 
            audioSource == MediaRecorder.AudioSource.MIC ? "MIC" : "VOICE_CALL",
            audioRecord.getSampleRate(),
            audioRecord.getChannelConfiguration() == AudioFormat.CHANNEL_IN_MONO ? "单声道" : "立体声",
            audioRecord.getAudioFormat() == AudioFormat.ENCODING_PCM_16BIT ? 16 : 8));
    }

    private void handleCallStateChange(int state) {
        android.util.Log.d(CALL_TAG, "处理通话状态变化: " + state);
        isCallStateChanging = true;
        
        // 如果正在录音，先发送结束帧
        if (isRecordingFlag && isWebSocketReady && isStartFrameSent && !isEndFrameSent) {
            android.util.Log.d(WS_TAG, "通话状态切换，发送结束帧");
            sendEndFrame();
            isWaitingForEndFrame = true;
            
            // 等待结束帧发送完成
            mainHandler.postDelayed(() -> {
                isWaitingForEndFrame = false;
                processCallStateChange(state);
            }, 1000);
        } else {
            processCallStateChange(state);
        }
    }

    private void processCallStateChange(int state) {
        switch (state) {
            case android.telephony.TelephonyManager.CALL_STATE_RINGING:
                android.util.Log.d(CALL_TAG, "电话响铃中");
                if (isRecordingFlag && audioRecord != null) {
                    audioRecord.stop();
                }
                break;
                
            case android.telephony.TelephonyManager.CALL_STATE_OFFHOOK:
                android.util.Log.d(CALL_TAG, "电话已接通");
                isInCall = true;
                if (isRecordingFlag) {
                    android.util.Log.d(AUDIO_TAG, "通话中重新配置音频");
                    // 设置音频路由
                    audioManager.setMode(android.media.AudioManager.MODE_IN_COMMUNICATION);
                    audioManager.setSpeakerphoneOn(true);
                    audioManager.setBluetoothScoOn(false);
                    audioManager.setMicrophoneMute(false);
                    
                    // 重新初始化AudioRecord
                    if (audioRecord != null) {
                        audioRecord.stop();
                        audioRecord.release();
                    }
                    
                    // 等待一小段时间确保音频系统稳定
                    mainHandler.postDelayed(() -> {
                        // 先初始化WebSocket连接
                        initWebSocket();
                        
                        // 等待WebSocket就绪后再初始化AudioRecord
                        mainHandler.postDelayed(() -> {
                            initAudioRecord(MediaRecorder.AudioSource.VOICE_RECOGNITION);
                            if (audioRecord != null && audioRecord.getState() == AudioRecord.STATE_INITIALIZED) {
                                audioRecord.startRecording();
                            }
                        }, 1000); // 等待WebSocket连接建立
                    }, 500);
                }
                break;
                
            case android.telephony.TelephonyManager.CALL_STATE_IDLE:
                android.util.Log.d(CALL_TAG, "电话已结束");
                isInCall = false;
                if (isRecordingFlag) {
                    android.util.Log.d(AUDIO_TAG, "通话结束后重新配置音频");
                    // 设置音频路由
                    audioManager.setMode(android.media.AudioManager.MODE_IN_COMMUNICATION);
                    audioManager.setSpeakerphoneOn(true);
                    audioManager.setBluetoothScoOn(false);
                    audioManager.setMicrophoneMute(false);
                    
                    // 重新初始化AudioRecord
                    if (audioRecord != null) {
                        audioRecord.stop();
                        audioRecord.release();
                    }
                    
                    // 等待一小段时间确保音频系统稳定
                    mainHandler.postDelayed(() -> {
                        // 先初始化WebSocket连接
                        initWebSocket();
                        
                        // 等待WebSocket就绪后再初始化AudioRecord
                        mainHandler.postDelayed(() -> {
                            initAudioRecord(MediaRecorder.AudioSource.VOICE_RECOGNITION);
                            if (audioRecord != null && audioRecord.getState() == AudioRecord.STATE_INITIALIZED) {
                                audioRecord.startRecording();
                            }
                        }, 1000); // 等待WebSocket连接建立
                    }, 500);
                }
                break;
        }
        isCallStateChanging = false;
    }

    private void sendAudioFrame(byte[] buffer, int read) {
        if (isCallStateChanging || isWaitingForEndFrame) {
            android.util.Log.d(WS_TAG, "通话状态正在切换或等待结束帧发送完成，缓存音频数据");
            synchronized (audioLock) {
                pendingAudioDataList.add(Arrays.copyOf(buffer, read));
            }
            return;
        }

        if (!isWebSocketReady || !isStartFrameSent) {
            android.util.Log.d(WS_TAG, "WebSocket未就绪或开始帧未发送，缓存音频数据");
            synchronized (audioLock) {
                pendingAudioDataList.add(Arrays.copyOf(buffer, read));
            }
            return;
        }

        sendAudioFrameInternal(buffer, read);
    }

    private void sendAudioFrameInternal(byte[] buffer, int read) {
        try {
            String base64 = android.util.Base64.encodeToString(buffer, 0, read, android.util.Base64.NO_WRAP);
            JSONObject frame = new JSONObject();
            frame.put("type", "audio");
            frame.put("frame_type", 1);
            frame.put("data", base64);
            webSocket.send(frame.toString());
            android.util.Log.d(WS_TAG, "发送音频数据帧");
        } catch (JSONException e) {
            android.util.Log.e(WS_TAG, "发送音频数据帧失败", e);
        }
    }

    private void sendEndFrame() {
        if (!isWebSocketReady || !isStartFrameSent || isEndFrameSent) {
            android.util.Log.d(WS_TAG, "无法发送结束帧：WebSocket未就绪或开始帧未发送或已发送结束帧");
            return;
        }

        try {
            JSONObject endFrame = new JSONObject();
            endFrame.put("type", "audio");
            endFrame.put("frame_type", 2);
            endFrame.put("data", "");
            webSocket.send(endFrame.toString());
            isEndFrameSent = true;
            android.util.Log.d(WS_TAG, "发送结束帧");
        } catch (JSONException e) {
            android.util.Log.e(WS_TAG, "发送结束帧失败", e);
        }
    }

    private boolean checkPermissions() {
        // Implement the logic to check for necessary permissions
        return true; // Placeholder return, actual implementation needed
    }

    public void startRecording() {
        if (isRecordingFlag) return;
        
        android.util.Log.d(TAG, "开始录音");
        
        // 检查权限
        if (!checkPermissions()) {
            error.setValue("缺少必要权限");
            return;
        }

        // 初始化AudioRecord
        initAudioRecord(MediaRecorder.AudioSource.VOICE_COMMUNICATION);
        if (audioRecord == null || audioRecord.getState() != AudioRecord.STATE_INITIALIZED) {
            error.setValue("录音初始化失败");
            return;
        }

        isRecording.setValue(true);
        isRecordingFlag = true;
        isStartFrameSent = false;
        isEndFrameSent = false;
        isWebSocketReady = false;
        synchronized (audioLock) {
            pendingAudioDataList.clear();
        }
        voiceText.postValue("");
        translatedText.postValue("");
        error.postValue("");

        // 初始化WebSocket连接
        initWebSocket();

        // 开始录音
        audioRecord.startRecording();
        new Thread(() -> {
            byte[] buffer = new byte[BUFFER_SIZE];
            while (isRecordingFlag) {
                int read = audioRecord.read(buffer, 0, buffer.length);
                if (read > 0) {
                    sendAudioFrame(buffer, read);
                }
            }
        }).start();
    }

    public void stopRecording() {
        if (!isRecordingFlag) return;
        
        android.util.Log.d(TAG, "停止录音");
        
        isRecordingFlag = false;
        isRecording.setValue(false);
        
        // 停止录音
        if (audioRecord != null) {
            audioRecord.stop();
            audioRecord.release();
            audioRecord = null;
        }
        
        // 发送结束帧
        sendEndFrame();
        
        // 关闭WebSocket连接
        if (webSocket != null) {
            webSocket.close(1000, "正常关闭");
        }
    }

    public void translateText(String text) {
        isLoading.setValue(true);
        new Thread(() -> {
            try {
                String result = translationRepository.translate(
                    text,
                    getCurrentSourceLangCode(),
                    getCurrentTargetLangCode()
                );
                translatedText.postValue(result);
            } catch (Exception e) {
                error.postValue("翻译失败: " + e.getMessage());
            } finally {
                isLoading.postValue(false);
            }
        }).start();
    }

    public void playTranslation() {
        if (tts != null && translatedText.getValue() != null) {
            int result = tts.speak(translatedText.getValue(), TextToSpeech.QUEUE_FLUSH, null, null);
            if (result == TextToSpeech.ERROR) {
                // 如果speak失败，尝试重新初始化TTS
                tts = new TextToSpeech(getApplication(), status -> {
                    if (status != TextToSpeech.ERROR) {
                        tts.setLanguage(Locale.CHINESE);
                        tts.speak(translatedText.getValue(), TextToSpeech.QUEUE_FLUSH, null, null);
                    }
                });
            }
        }
    }

    public void copyTranslation() {
        String text = translatedText.getValue();
        if (text != null && !text.isEmpty()) {
            ClipboardManager clipboard = (ClipboardManager) getApplication().getSystemService(Context.CLIPBOARD_SERVICE);
            ClipData clip = ClipData.newPlainText("翻译结果", text);
            clipboard.setPrimaryClip(clip);
            Toast.makeText(getApplication(), "已复制", Toast.LENGTH_SHORT).show();
        }
    }

    private String getCurrentSourceLangCode() {
        int idx = sourceLangIndex.getValue() != null ? sourceLangIndex.getValue() : 0;
        return languages.get(idx).getCode();
    }
    private String getCurrentTargetLangCode() {
        int idx = targetLangIndex.getValue() != null ? targetLangIndex.getValue() : 1;
        return languages.get(idx).getCode();
    }

    @Override
    protected void onCleared() {
        super.onCleared();
        getApplication().unregisterReceiver(audioDataReceiver);
        if (audioRecord != null) {
            audioRecord.release();
            audioRecord = null;
        }
        if (tts != null) tts.shutdown();
        if (telephonyManager != null && phoneStateListener != null) {
            telephonyManager.listen(phoneStateListener, android.telephony.PhoneStateListener.LISTEN_NONE);
            phoneStateListener = null;
        }
        synchronized (audioLock) {
            pendingAudioDataList.clear();
        }
        if (webSocket != null) {
            webSocket.close(1000, "正常关闭");
        }
    }
} 