package com.example.loginpage

import android.annotation.SuppressLint
import android.app.Activity
import android.content.Context
import android.content.Intent
import android.content.SharedPreferences
import android.content.pm.PackageManager
import android.media.MediaRecorder
import android.net.Uri
import android.os.Bundle
import android.speech.tts.TextToSpeech
import android.speech.tts.UtteranceProgressListener
import android.text.TextUtils
import android.view.Gravity
import android.view.LayoutInflater
import android.view.MotionEvent
import android.view.View
import android.view.ViewGroup
import android.widget.PopupMenu
import android.widget.TextView
import android.widget.Toast
import androidx.activity.result.ActivityResultLauncher
import androidx.activity.result.contract.ActivityResultContracts
import androidx.core.app.ActivityCompat
import androidx.core.content.ContextCompat
import androidx.fragment.app.Fragment
import androidx.lifecycle.MutableLiveData
import androidx.lifecycle.ViewModelProvider
import com.example.loginpage.ChatAPI.ChatViewModel
import com.example.loginpage.CloudSQL.MySQLConnection
import com.example.loginpage.InputUtil.AudioViewModel
import com.example.loginpage.InputUtil.BitmapStorage
import com.example.loginpage.InputUtil.DataViewModel
import com.example.loginpage.InputUtil.FileProvider
import com.example.loginpage.InputUtil.OCRViewModel
import com.example.loginpage.databinding.FragmentChatBinding
import com.google.gson.Gson
import com.google.gson.reflect.TypeToken
import java.io.ByteArrayOutputStream
import java.io.File
import java.io.FileInputStream
import java.io.FileNotFoundException
import java.io.IOException
import java.io.InputStream
import java.util.Base64
import java.util.Locale
import kotlin.properties.Delegates

class ChatFragment : Fragment(){
    private lateinit var binding: FragmentChatBinding
    private lateinit var chatViewModel: ChatViewModel
    private lateinit var audioViewModel: AudioViewModel
    private lateinit var ocrViewModel: OCRViewModel
    private lateinit var dataViewModel: DataViewModel
    private lateinit var mySQL: MySQLConnection

    private lateinit var cameraLauncher: ActivityResultLauncher<Uri>
    private lateinit var imagePickerLauncher: ActivityResultLauncher<Intent>
    private lateinit var finalUri:Uri
    private lateinit var imageUri:MutableLiveData<Uri?>

    private lateinit var mediaRecorder: MediaRecorder

    private lateinit var sharedPreferences: SharedPreferences

    private var isAllTheseAsked = false
    private var isRecording = false
    private var audioBase64 = ""
    private var ocrBase64 = ""
    private var tts: TextToSpeech? = null

    private var storeNum by Delegates.notNull<Int>()
    private lateinit var store:MutableList<String>
    private lateinit var storeString: String

    private var chatType = "文心一言"
    private var userType = "用户"

    @SuppressLint("InflateParams", "ClickableViewAccessibility", "ResourceType", "RtlHardcoded")
    override fun onCreateView(
        inflater: LayoutInflater, container: ViewGroup?,
        savedInstanceState: Bundle?
    ): View {
        binding = FragmentChatBinding.inflate(layoutInflater,container, false)
        chatViewModel = ViewModelProvider(requireActivity())[ChatViewModel::class.java]
        audioViewModel = ViewModelProvider(requireActivity())[AudioViewModel::class.java]
        ocrViewModel = ViewModelProvider(requireActivity())[OCRViewModel::class.java]
        dataViewModel = ViewModelProvider(requireActivity())[DataViewModel::class.java]
        mySQL = ViewModelProvider(requireActivity())[MySQLConnection::class.java]

        sharedPreferences = requireActivity().getSharedPreferences(AccountFragment.PREFS_NAME, Context.MODE_PRIVATE)
        storeNum = sharedPreferences.getInt(AccountFragment.STORE_NUM, 0)
        storeString = sharedPreferences.getString(AccountFragment.STORE_STRING, "[]").toString()
        store = Gson().fromJson(storeString, object : TypeToken<List<String>>(){}.type)
        imageUri =MutableLiveData<Uri?>(null)

        // 保存了交互记录有关的String，切换界面后恢复先前的问答状态
        dataViewModel.userAsking.observe(requireActivity()){
            if(!isAllTheseAsked){
                isAllTheseAsked = true
                val iterator = dataViewModel.llmAnswer.value?.iterator()
                for (ask in it){
                    // 恢复用户的提问
                    val userField = layoutInflater.inflate(R.layout.user_text_field,null) as ViewGroup
                    val userText = userField.findViewById<TextView>(R.id.userText)
                    val userName = userField.findViewById<TextView>(R.id.userName)
                    userName.text = userType
                    userText.text = ask
                    binding.allField.addView(userField)
                    if (iterator != null) {
                        if(iterator.hasNext()){
                            val answer = iterator.next()
                            // 恢复大模型的回答
                            val llmField = layoutInflater.inflate(R.layout.llm_text_field, null) as ViewGroup
                            val llmText = llmField.findViewById<TextView>(R.id.llmText)
                            val llmName = llmField.findViewById<TextView>(R.id.llmName)
                            llmName.text = chatType
                            llmText.text = answer
                            binding.allField.addView(llmField)
                        }
                    }
                }
            }
        }
        isAllTheseAsked = true
        // 更新大模型的Token
        chatViewModel.getAccess()
        // 更新华为云语音识别的Token
        audioViewModel.getIAM(AK, SK)
        // 更新华为云文字识别的Token
        ocrViewModel.getIAM(AK,SK)

        // 以图片的形式保存当前记录
        binding.btnStore.setOnClickListener {
            context?.let { it1 ->
                val writePermissionGranted = ContextCompat.checkSelfPermission(it1, "android.permission.WRITE_EXTERNAL_STORAGE") != PackageManager.PERMISSION_GRANTED
                val readPermissionGranted = ContextCompat.checkSelfPermission(it1, "android.permission.READ_MEDIA_IMAGES") != PackageManager.PERMISSION_GRANTED
                if (writePermissionGranted && readPermissionGranted) {
                    // 两个权限都没有被授予
                    Toast.makeText(requireContext(), "请授予有关权限", Toast.LENGTH_SHORT).show()
                    val permissions = arrayOf("android.permission.READ_MEDIA_IMAGES", "android.permission.WRITE_EXTERNAL_STORAGE")
                    Toast.makeText(requireContext(), "请授予有关权限", Toast.LENGTH_SHORT).show()
                    activity?.let { it2 -> ActivityCompat.requestPermissions(it2, permissions, PERMISSION_REQUEST_CODE) }
                } else {
                    // 两个权限有一个被授予被授予
                    // Permission has already been granted
                    var isSaved = false
                    val storeNumCopy = storeNum
                    while (!isSaved && storeNum - storeNumCopy < 50){
                        isSaved = BitmapStorage.getPicture(requireActivity(), binding.scrollView,"myImage${storeNum}.png")

                        if (isSaved){
                            store.add("myImage${storeNum}.png")
                            storeString = Gson().toJson(store)
                            sharedPreferences.edit()
                                .putInt(AccountFragment.STORE_NUM, ++storeNum)
                                .putString(AccountFragment.STORE_STRING, storeString)
                                .apply()
                            Toast.makeText(requireContext(), "图片保存成功", Toast.LENGTH_SHORT).show()
                        } else{
                            storeNum++
                            Toast.makeText(requireContext(), "图片保存失败", Toast.LENGTH_SHORT).show()
                        }
                        sharedPreferences.edit()
                            .putInt(AccountFragment.STORE_NUM, storeNum)
                            .putString(AccountFragment.STORE_STRING, storeString)
                            .apply()
                    }
                }
            }
        }
        // 创建一个扩展 MainActivity 的类的实例，并实现 TextToSpeech.OnInitListener 接口
        val ttsListener = TextToSpeech.OnInitListener { status ->
            if (status == TextToSpeech.SUCCESS) {
                // 设置语言
                val result = tts?.setLanguage(Locale.CHINA)
                if (result == TextToSpeech.LANG_MISSING_DATA || result == TextToSpeech.LANG_NOT_SUPPORTED) {
                    println("Language not supported")
                }
            } else {
                println("Initialization failed")
            }
        }
        tts = TextToSpeech(requireContext(), ttsListener)

        // 监测相机拍照是否有照片返回
        cameraLauncher = registerForActivityResult(ActivityResultContracts.TakePicture()) { success ->
            if (success) {
                isAllTheseAsked = true
                // 拍照成功，更新imageUri的值
                imageUri.postValue(finalUri)
            }
        }
        // 监测是否有照片从相册返回
        imagePickerLauncher = registerForActivityResult(ActivityResultContracts.StartActivityForResult()) { result ->
            if (result.resultCode == Activity.RESULT_OK) {
                isAllTheseAsked = true
                // 处理选择的图片
                result.data?.data?.let { uri ->
                    imageUri.postValue(uri)
                }
            }
        }

        // btnSubmit提交按钮点击事件
        binding.btnSubmit.setOnClickListener {
            val question:String = binding.inputText.text.toString()
            if(TextUtils.isEmpty(question)){
                Toast.makeText(requireContext(), "输入不能为空", Toast.LENGTH_SHORT).show()
            } else{
                // 提交问题给大模型
                sendChatRequest(question)
                // 用于切换界面时恢复数据
                chatViewModel.allAsking.add(question)
                dataViewModel.setUserAsking(chatViewModel.allAsking)
                // 加载新的对话框
                val userField = layoutInflater.inflate(R.layout.user_text_field,null) as ViewGroup
                val userText = userField.findViewById<TextView>(R.id.userText)
                val userName = userField.findViewById<TextView>(R.id.userName)
                userName.text = userType
                userText.text = binding.inputText.text
                binding.inputText.text.clear()
                binding.allField.addView(userField)
            }
        }
        // btnVoice语音按钮的点击事件
        binding.btnVoice.setOnClickListener {

            if(binding.inputText.isFocusable){
                // 切换输入框为“按住说话”，以便用户进行语音输入
                binding.inputText.setText("按住说话")
                binding.inputText.setGravity(Gravity.CENTER)
                binding.inputText.isFocusable = false
                binding.btnSubmit.setEnabled(false)
            } else{
                // 切换“按住说话”为输入框，用户进行文字输入
                binding.inputText.isFocusable = true
                binding.btnSubmit.setEnabled(true)
                binding.inputText.setGravity(Gravity.LEFT)
                binding.inputText.text.clear()
            }
        }
        // “按住说话”的长按事件，开始录音
        binding.inputText.setOnLongClickListener {
            // 在这里执行长按时的操作
            if (!binding.inputText.isFocusable) {
                startRecording()
            }
            true // 返回true表示已经处理了长按事件
        }
        // “按住说话”的松开事件，结束录音
        binding.inputText.setOnTouchListener { _, event ->
            if (event.action == MotionEvent.ACTION_UP) {
                // 在这里执行手指松开时的操作
                if (!binding.inputText.isFocusable && isRecording) {
                    stopRecording()
                    audioBase64 = getRecordingBase64(
                        File(
                            requireContext().externalCacheDir,
                            "recorded_audio.amr"
                        )
                    ).toString()
                    audioViewModel.performSISRequest(audioBase64)
                }
            }
            false // 返回false，让其他事件继续传递，如点击事件
        }
        // btnMore提交照片的点击事件
        binding.btnMore.setOnClickListener {
            // 创建PopupMenu
            val popupMenu = PopupMenu(requireContext(), it)
            // 将菜单资源加载到PopupMenu中
            popupMenu.menuInflater.inflate(R.menu.popup_menu, popupMenu.menu)
            // 设置PopupMenu的点击监听器
            popupMenu.setOnMenuItemClickListener { item ->
                when (item.itemId) {
                    R.id.action_option1 -> {
                        takePhoto()
                        true
                    }
                    R.id.action_option2 -> {
                        pickImageFromGallery()
                        true
                    }
                    else -> false
                }
            }
            // 显示PopupMenu
            popupMenu.show()
        }
        // tts监听事件
        tts?.setOnUtteranceProgressListener(object : UtteranceProgressListener() {
            override fun onStart(utteranceId: String?) {
                // 播报开始时的操作（如果需要）
            }
            override fun onDone(utteranceId: String?) {
                // 播报完成后的操作，关闭TTS服务
                tts?.shutdown()
            }
            @Deprecated("Deprecated in Java")
            override fun onError(utteranceId: String?) {
                // 播报出错时的操作（如果需要）
            }
        })
        // 监听大模型的回答
        chatViewModel.lastResponse.observe(requireActivity()){

            if(it == null){
                val llmField = layoutInflater.inflate(R.layout.llm_text_field, null) as ViewGroup
                val llmText = llmField.findViewById<TextView>(R.id.llmText)
                val llmName = llmField.findViewById<TextView>(R.id.llmName)
                llmName.text = chatType
                llmText.text = "连接超时，请重新提问！"
                chatViewModel.allAnswer.add(llmText.text.toString())
                dataViewModel.setLLMAnswer(chatViewModel.allAnswer)
                binding.allField.addView(llmField)
                chatViewModel.clearResponse()
                // 语音必须放最后
                if(dataViewModel.isTTS.value == true){
                    tts?.speak("抱歉，没能回答您的问题，您可以重新提问", TextToSpeech.QUEUE_FLUSH, null, null)
                }
            } else if(it != "null"){
                val llmField = layoutInflater.inflate(R.layout.llm_text_field, null) as ViewGroup
                val llmText = llmField.findViewById<TextView>(R.id.llmText)
                val llmName = llmField.findViewById<TextView>(R.id.llmName)
                llmName.text = chatType
                llmText.text = it
                chatViewModel.allAnswer.add(it)
                dataViewModel.setLLMAnswer(chatViewModel.allAnswer)
                binding.allField.addView(llmField)
                chatViewModel.clearResponse()
                // 语音必须放最后
                if(dataViewModel.isTTS.value == true){
                    tts?.speak(llmText.text, TextToSpeech.QUEUE_FLUSH, null, null)
                }
            }

        }
        // 监听用户的语音识别结果
        audioViewModel.myWords.observe(requireActivity()){
            if(TextUtils.isEmpty(it)){
                Toast.makeText(requireContext(), "未检测到用户说话", Toast.LENGTH_SHORT).show()
            } else if(it != "null"){
                sendChatRequest(it)
                chatViewModel.allAsking.add(it)
                dataViewModel.setUserAsking(chatViewModel.allAsking)
                val userField = layoutInflater.inflate(R.layout.user_text_field,null) as ViewGroup
                val userText = userField.findViewById<TextView>(R.id.userText)
                val userName = userField.findViewById<TextView>(R.id.userName)
                userName.text = userType
                userText.text = it
                binding.allField.addView(userField)
                audioViewModel.clearMyWords()
            }
        }
        // 监听是否有照片返回
        imageUri.observe(requireActivity()) {
            if (it != null) {
                ocrBase64 = getImageBase64(requireContext(),it).toString()
                ocrViewModel.getOCRRequest(ocrBase64)
                imageUri.postValue(null)
            }
        }
        // 监听文字识别的结果
        ocrViewModel.myWords.observe(requireActivity()){
            if(TextUtils.isEmpty(it)){
                Toast.makeText(requireContext(), "图片中没有文字", Toast.LENGTH_SHORT).show()
            } else if (it != "null"){
                sendChatRequest(it)
                chatViewModel.allAsking.add(it)
                dataViewModel.setUserAsking(chatViewModel.allAsking)
                val userField = layoutInflater.inflate(R.layout.user_text_field,null) as ViewGroup
                val userText = userField.findViewById<TextView>(R.id.userText)
                val userName = userField.findViewById<TextView>(R.id.userName)
                userName.text = userType
                userText.text = it
                binding.allField.addView(userField)
                ocrViewModel.clearMyWords()
            }
        }

        return binding.root
    }

    override fun onDestroy() {
        super.onDestroy()
        if (tts != null) {
            tts?.stop()
            tts?.shutdown()
        }
    }
    private fun sendChatRequest(question: String){
        dataViewModel.chatType.observe(requireActivity()){
            chatType = it
        }
        when(chatType){
            "智谱清言" -> chatViewModel.sendRequest(question,"chatglm2_6b_32k")
            "轩辕大模型" -> chatViewModel.sendRequest(question,"xuanyuan_70b_chat")
            "Gemma" -> chatViewModel.sendRequest(question,"gemma_7b_it")
            "LLaMa" -> chatViewModel.sendRequest(question,"llama_3_70b")
            else -> {
                chatType = "文心一言"
                chatViewModel.sendRequest(question,"ernie-4.0-8k-0329")
            }
        }
    }
    // 存储华为云的AK和SK
    companion object {
        private const val AK = "R75LHVQPR35QP6054J57"
        private const val SK = "3qBDSkDTRyQtuQGKrqdl2PWWa7ex4y7tmmQzj9Sb"
        const val PERMISSION_REQUEST_CODE = 128431782
    }
    // 开始录音
    private fun startRecording() {
        // 检查RECORD_AUDIO权限
        if (context?.let { it1 -> ContextCompat.checkSelfPermission(it1, "android.permission.RECORD_AUDIO") }
            != PackageManager.PERMISSION_GRANTED) {
            Toast.makeText(requireContext(), "请授予有关权限", Toast.LENGTH_SHORT).show()
            // Permission is not granted
            activity?.let { it1 ->
                ActivityCompat.requestPermissions(it1, arrayOf("android.permission.RECORD_AUDIO"),
                    PERMISSION_REQUEST_CODE
                )
            }
        } else{
            isRecording = true
            // 创建MediaRecorder实例
            mediaRecorder = MediaRecorder(requireContext())
            // 设置MediaRecorder的音频源为MIC
            mediaRecorder.setAudioSource(MediaRecorder.AudioSource.MIC)
            // 设置MediaRecorder的音频格式为AMR_NB
            mediaRecorder.setOutputFormat(MediaRecorder.OutputFormat.AMR_NB)
            // 设置MediaRecorder的音频编码为AMR_NB
            mediaRecorder.setAudioEncoder(MediaRecorder.AudioEncoder.AMR_NB)
            // 设置MediaRecorder的输出文件路径
            val fileName = "recorded_audio.amr"
            val file = File(requireContext().externalCacheDir, fileName)
            mediaRecorder.setOutputFile(file.absolutePath)
            try {
                // 准备MediaRecorder
                mediaRecorder.prepare()
                // 开始录音
                mediaRecorder.start()
            } catch (e: IOException) {
                e.printStackTrace()
            }
        }
    }
    // 停止录音
    private fun stopRecording() {
        if(isRecording){
            isRecording = false
            // 停止录音
            mediaRecorder.stop()
            // 释放MediaRecorder
            mediaRecorder.release()
            // 重置MediaRecorder
//            mediaRecorder.reset()
        }
    }
    // 拍摄照片
    private fun takePhoto() {
        // 检查CAMERA权限
        if (context?.let { it1 -> ContextCompat.checkSelfPermission(it1, "android.permission.CAMERA") }
            != PackageManager.PERMISSION_GRANTED) {
            Toast.makeText(requireContext(), "请授予有关权限", Toast.LENGTH_SHORT).show()
            // Permission is not granted
            activity?.let { it1 ->
                ActivityCompat.requestPermissions(it1, arrayOf("android.permission.CAMERA"),
                    PERMISSION_REQUEST_CODE
                )
            }
        } else{
            //获取上下文对象
            val ctx = requireActivity()
            finalUri = FileProvider.getImageUri(ctx)
            cameraLauncher.launch(finalUri)
        }
    }
    // 从相册选择照片
    private fun pickImageFromGallery() {
        // 检查READ_EXTERNAL_STORAGE权限
        context?.let { it1 ->
            val writePermissionGranted = ContextCompat.checkSelfPermission(it1, "android.permission.READ_EXTERNAL_STORAGE") != PackageManager.PERMISSION_GRANTED
            val readPermissionGranted = ContextCompat.checkSelfPermission(it1, "android.permission.READ_MEDIA_IMAGES") != PackageManager.PERMISSION_GRANTED
            if (writePermissionGranted && readPermissionGranted) {
                // 两个权限都没有被授予
                Toast.makeText(requireContext(), "请授予有关权限", Toast.LENGTH_SHORT).show()
                val permissions = arrayOf("android.permission.READ_MEDIA_IMAGES", "android.permission.READ_EXTERNAL_STORAGE")
                Toast.makeText(requireContext(), "请授予有关权限", Toast.LENGTH_SHORT).show()
                activity?.let { it2 -> ActivityCompat.requestPermissions(it2, permissions,
                    ChatFragment.PERMISSION_REQUEST_CODE
                ) }
            } else{
                val intent = Intent(Intent.ACTION_PICK).apply {
                    type = "image/*"
                }
                imagePickerLauncher.launch(intent)
            }
        }
    }
}
// 语音转Base64
fun getRecordingBase64(recordingFile: File): String? {
    if (!recordingFile.exists()) {
        return null
    }
    val inputStream: InputStream
    try {
        inputStream = FileInputStream(recordingFile)
    } catch (e: FileNotFoundException) {
        return null
    }
    val byteArrayOutputStream = ByteArrayOutputStream()
    val buffer = ByteArray(1024)
    var bytesRead: Int
    try {
        while (inputStream.read(buffer).also { bytesRead = it } != -1) {
            byteArrayOutputStream.write(buffer, 0, bytesRead)
        }
    } catch (e: IOException) {
        return null
    } finally {
        inputStream.close()
        byteArrayOutputStream.close()
    }
    val byteArray = byteArrayOutputStream.toByteArray()
    val base64String = Base64.getEncoder().encodeToString(byteArray)

    return base64String
}

// 图片转Base64
fun getImageBase64(context: Context, imageUri: Uri): String? {
    var inputStream: InputStream? = null
    try {
        // 使用 ContentResolver 获取 InputStream
        inputStream = context.contentResolver.openInputStream(imageUri)
        // 读取 InputStream 到字节数组
        val bytes = ByteArrayOutputStream().use { byteArrayOutputStream ->
            inputStream?.copyTo(byteArrayOutputStream)
            byteArrayOutputStream.toByteArray()
        }
        // 将字节数组转换为 Base64 编码
        return Base64.getEncoder().encodeToString(bytes)
    } catch (e: IOException) {
        e.printStackTrace()
    } finally {
        // 确保关闭 InputStream
        inputStream?.close()
    }
    return null
}

