<script setup lang="ts">
import { computed, ref, watch } from 'vue'
import {
  NAutoComplete,
  NButton,
  NDropdown,
  NImage,
  NInput,
  NModal,
  NPopover,
  NTag,
  NTooltip,
  useMessage,
} from 'naive-ui'
import type { AutoCompleteOptions } from 'naive-ui/es/auto-complete/src/interface'
import type { RenderLabel } from 'naive-ui/es/_internal/select-menu/src/interface'
import { useRoute } from 'vue-router'
import AiMic from './aiMic.vue'
import { useBasicLayout } from '@/hooks/useBasicLayout'
import { t } from '@/locales'

import { PromptStore, SvgIcon } from '@/components/common'
import {
  GptUploader,
  Recognition,
  canVisionModel,
  chatSetting,
  checkDisableGpt4,
  countTokens,
  getFileFromClipboard,
  isFileMp3,
  mlog,
  upImg,
} from '@/api'
import { gptConfigStore, homeStore, useChatStore } from '@/store'
import aiModel from '@/views/mj/aiModel.vue'
import { useIconRender } from '@/hooks/useIconRender'

const props = defineProps<{
  modelValue: string
  disabled?: boolean
  searchOptions?: AutoCompleteOptions
  renderOption?: RenderLabel
}>()

const emit = defineEmits([
  'update:modelValue',
  'update:chatType',
  'export',
  'handleClear',
])

const { iconRender } = useIconRender()

const route = useRoute()
const chatStore = useChatStore()
const fsRef = ref()
const st = ref<{
  fileBase64: string[]
  isLoad: number
  isShow: boolean
  showMic: boolean
  micStart: boolean
  chatType: boolean
}>({
  fileBase64: [],
  isLoad: 0,
  isShow: false,
  showMic: false,
  micStart: false,
  chatType: false,
})
const { isMobile } = useBasicLayout()
const placeholder = computed(() => {
  if (isMobile.value)
    return t('chat.placeholderMobile')
  return t('chat.placeholder')
})

const { uuid } = route.params as { uuid: string }
const uuid1 = chatStore.active
const chatSet = new chatSetting(uuid1 == null ? 1002 : uuid1)
const nGptStore = ref(chatSet.getGptConfig())
const dataSources = computed(() => chatStore.getChatByUuid(+uuid))

watch(
  () => gptConfigStore.myData,
  () => (nGptStore.value = chatSet.getGptConfig()),
  { deep: true },
)
watch(
  () => homeStore.myData.act,
  n => n == 'saveChat' && (nGptStore.value = chatSet.getGptConfig()),
  { deep: true },
)
const handleSubmit = () => {
  if (mvalue.value == '')
    return
  if (checkDisableGpt4(gptConfigStore.myData.model)) {
    ms.error(t('mj.disableGpt4'))
    return false
  }
  if (homeStore.myData.isLoader)
    return

  const obj = {
    prompt: mvalue.value,
    fileBase64: st.value.fileBase64,
    chatType: st.value.chatType ? 1 : 0,
    appId: gptConfigStore.myData.gpts ? gptConfigStore.myData.gpts.id : '',
  }
  homeStore.setMyData({ act: 'gpt.submit', actData: obj })
  mvalue.value = ''
  st.value.fileBase64 = []
  return false
}
const ms = useMessage()
const mvalue = computed({
  get() {
    return props.modelValue
  },
  set(value) {
    emit('update:modelValue', value)
  },
})

function selectFile(input: any) {
  const file = input.target.files[0]
  upFile(file)
}

const myToken = ref({ remain: 0, modelTokens: '4k' })
const funt = async () => {
  const d = await countTokens(
    dataSources.value,
    mvalue.value,
    chatStore.active ?? 1002,
  )
  myToken.value = d
  return d
}
watch(() => mvalue.value, funt)
watch(() => dataSources.value, funt)
watch(() => gptConfigStore.myData, funt, { deep: true })
watch(() => homeStore.myData.isLoader, funt, { deep: true })
funt()

const upFile = (file: any) => {
  if (!canVisionModel(gptConfigStore.myData.model)) {
    if (isFileMp3(file.name)) {
      mlog('mp3', file)

      homeStore.setMyData({
        act: 'gpt.whisper',
        actData: { file, prompt: 'whisper' },
      })
    }
    else {
      upImg(file)
        .then((uploadResult) => {
          fsRef.value.value = ''
          // 只取URL部分
          const imageUrl = uploadResult.url
          // 检查是否已经上传过相同的URL
          if (st.value.fileBase64.findIndex(v => v === imageUrl) > -1) {
            ms.error(t('mj.noReUpload')) // '不能重复上传'
            return
          }
          // 将图片URL添加到数组中
          st.value.fileBase64.push(imageUrl)
        })
        .catch(e => ms.error(e))
    }
  }
  else {
    const formData = new FormData()
    // const file = input.target.files[0];
    formData.append('file', file)
    ms.info(t('mj.uploading'))
    st.value.isLoad = 1
    GptUploader('/chat/upload', formData)
      .then((r) => {
        // mlog('上传成功', r);
        st.value.isLoad = 0
        if (r.url) {
          ms.info(t('mj.uploadSuccess'))
          if (r.url.includes('http'))
            st.value.fileBase64.push(r.url)
          else
            st.value.fileBase64.push(location.origin + r.url)
        }
        else if (r.error) { ms.error(r.error) }
      })
      .catch((e) => {
        st.value.isLoad = 0
        ms.error(t('mj.uploadFail') + (e.message ?? JSON.stringify(e)))
      })
  }
}

function handleEnter(event: KeyboardEvent) {
  if (!isMobile.value) {
    if (event.key === 'Enter' && !event.shiftKey) {
      event.preventDefault()
      handleSubmit()
    }
  }
  else {
    if (event.key === 'Enter' && event.ctrlKey) {
      event.preventDefault()
      handleSubmit()
    }
  }
}

const acceptData = computed(() => {
  if (canVisionModel(gptConfigStore.myData.model))
    return '*/*'
  return 'image/jpeg, image/jpg, image/png, image/gif, .mp3, .mp4, .mpeg, .mpga, .m4a, .wav, .webm'
})

const drop = (e: DragEvent) => {
  e.preventDefault()
  e.stopPropagation()
  if (!e.dataTransfer || e.dataTransfer.files.length == 0)
    return
  const files = e.dataTransfer.files
  const file = files[0]

  upFile(file)
  // mlog('drop', files);
}
const paste = (e: ClipboardEvent) => {
  const rz = getFileFromClipboard(e)
  if (rz.length > 0)
    upFile(rz[0])
}
const sendMic = (e: any) => {
  mlog('sendMic', e)
  st.value.showMic = false
  const du = 'whisper.wav' // (e.stat && e.stat.duration)?(e.stat.duration.toFixed(2)+'s'):'whisper.wav';
  const file = new File([e.blob], du, { type: 'audio/wav' })
  homeStore.setMyData({
    act: 'gpt.whisper',
    actData: { file, prompt: 'whisper', duration: e.stat?.duration },
  })
}

// 语音识别ASR
const goASR = () => {
  console.log('触发语音识别')

  const olod = mvalue.value
  const rec = new Recognition()
  console.log('🚀 ~ goASR ~ rec:', rec)
  let rz = ''
  rec
    .setListener((r: string) => {
      // mlog('result ', r  );
      rz = r
      mvalue.value = r
      console.log('mvalue.value1111', mvalue.value)
      st.value.micStart = true
    })
    .setOnEnd(() => {
      // mlog('rec end');
      mvalue.value = olod + rz
      console.log('mvalue.value', mvalue.value)

      ms.info(t('mj.micRecEnd'))
      st.value.micStart = false
    })
    .setOpt({
      timeOut: 3000,
      onStart: () => {
        ms.info(t('mj.micRec'))
        st.value.micStart = true
      },
    })
    .start()
}

const drOption = [
  {
    label: t('mj.micWhisper'),
    key: 'whisper',
    icon: iconRender({ icon: 'ri:openai-fill' }),
  },
  {
    label: t('mj.micAsr'),
    icon: iconRender({ icon: 'ri:chrome-line' }),
    key: 'asr',
  },
]
const handleSelectASR = (key: string | number) => {
  if (key == 'asr')
    goASR()
  if (key == 'whisper')
    st.value.showMic = true
}
/**
 * 校验字符串的大小
 * @param inputStr 输入的字符
 * @param maxLength 字符串长度
 */
const truncateText = (inputStr: any, maxLength = 20) => {
  // 处理空值情况
  if (!inputStr)
    return ''
  // 类型安全校验
  const str = String(inputStr)
  // 判断并截断
  return str.length > maxLength ? `${str.slice(0, maxLength)}...` : str
}

const show = ref(false)
function handleExport() {
  emit('export')
}
function handleClear() {
  emit('handleClear')
}
</script>

<template>
  <div v-if="st.showMic" class="myinputs flex justify-center items-center">
    <AiMic @cancel="st.showMic = false" @send="sendMic" />
  </div>
  <div v-else>
    <div
      v-if="st.fileBase64.length > 0"
      class="flex items-base justify-start pb-1 flex-wrap-reverse"
      style="margin: 0 40px"
    >
      <div
        v-for="(v, ii) in st.fileBase64"
        class="w-[60px] h-[60px] rounded-sm bg-slate-50 mr-1 mt-1 text-red-300 relative group"
      >
        <NImage :src="v" object-fit="cover" class="w-full h-full">
          <template #placeholder>
            <a
              class="w-full h-full flex items-center justify-center text-neutral-500"
              :href="v"
              target="_blank"
            >
              <SvgIcon icon="mdi:download" />{{ $t("mj.attr1") }} {{ ii + 1 }}
            </a>
          </template>
        </NImage>
        <SvgIcon
          icon="mdi:close"
          class="hidden group-hover:block absolute top-[-5px] right-[-5px] rounded-full bg-red-300 text-white cursor-pointer"
          @click="st.fileBase64.splice(st.fileBase64.indexOf(v), 1)"
        />
      </div>
    </div>

    <div
      class="myinputs"
      :class="[!isMobile ? 'chat-footer' : '']"
      @drop="drop"
      @paste="paste"
    >
      <input
        id="fileInput"
        ref="fsRef"
        type="file"
        class="hidden"
        :accept="acceptData"
        @change="selectFile"
      >
      <!-- 手机端 -->
      <div class="w-full relative">
        <div v-if="isMobile" class="absolute bottom-0 right-0 z-1">
          <NPopover trigger="hover">
            <template #trigger>
              <NTag
                type="info"
                round
                size="small"
                style="cursor: pointer"
                :bordered="false"
              >
                <div class="opacity-60 flex">
                  <SvgIcon icon="material-symbols:token-outline" />
                  {{ $t("mj.remain") }}{{ myToken.remain }}/{{
                    myToken.modelTokens
                  }}
                </div>
              </NTag>
            </template>
            <div class="w-[300px]">
              {{ $t("mj.tokenInfo1") }}
              <p class="py-1" v-text="$t('mj.tokenInfo2')" />
              <p class="text-right">
                <NButton type="info" size="small" @click="st.isShow = true">
                  {{
                    $t("setting.setting")
                  }}
                </NButton>
              </p>
            </div>
          </NPopover>
        </div>
      </div>
      <NAutoComplete
        v-model:value="mvalue"
        :options="searchOptions"
        :render-label="renderOption"
        :class="[!isMobile ? 'chat-input' : '']"
      >
        <template #default="{ handleInput, handleBlur, handleFocus }">
          <NInput
            ref="inputRef"
            v-model:value="mvalue"
            type="textarea"
            :placeholder="placeholder"
            rows="3"
            :autosize="{ minRows: 3, maxRows: 5 }"
            :theme-overrides="
              !isMobile
                ? {
                  border: '0',
                  borderHover: '#FFF',
                  borderFocus: '#FFF',
                  boxShadowFocus: '#FFF',
                }
                : {}
            "
            @input="handleInput"
            @focus="handleFocus"
            @blur="handleBlur"
            @keypress="handleEnter"
          >
            <template v-if="isMobile" #prefix>
              <!-- 上传按钮（移动端） -->
              <div class="relative; w-[22px]">
                <NTooltip trigger="hover">
                  <template #trigger>
                    <SvgIcon
                      v-if="st.isLoad == 1"
                      icon="line-md:uploading-loop"
                      class="absolute bottom-[10px] left-[8px] cursor-pointer"
                    />
                    <SvgIcon
                      v-else
                      icon="ri:attachment-line"
                      class="absolute bottom-[10px] left-[8px] cursor-pointer"
                      @click="fsRef.click()"
                    />
                  </template>
                  <div
                    v-if="canVisionModel(gptConfigStore.myData.model)"
                    v-html="$t('mj.upPdf')"
                  />
                  <div v-else v-html="$t('mj.upImg')" />
                </NTooltip>
              </div>
              <!-- 语音按钮 -->
              <NDropdown
                trigger="hover"
                :options="drOption"
                @select="handleSelectASR"
              >
                <div class="relative; w-[22px]">
                  <div
                    v-if="st.micStart"
                    class="absolute bottom-[14px] left-[31px]"
                  >
                    <span class="relative flex h-3 w-3">
                      <span
                        class="animate-ping absolute inline-flex h-full w-full rounded-full bg-red-500 opacity-75"
                      />
                      <span
                        class="relative inline-flex rounded-full h-3 w-3 bg-red-400"
                      />
                    </span>
                  </div>

                  <SvgIcon
                    icon="bi:mic"
                    class="absolute bottom-[10px] left-[30px] cursor-pointer"
                  />
                </div>
              </NDropdown>
            </template>
            <template v-if="isMobile" #suffix>
              <div class="relative; w-[40px]">
                <div class="absolute bottom-[-3px] right-[0px]">
                  <NButton
                    type="primary"
                    :disabled="disabled || homeStore.myData.isLoader"
                    @click="handleSubmit"
                  >
                    <template #icon>
                      <span class="dark:text-black">
                        <SvgIcon
                          v-if="homeStore.myData.isLoader"
                          icon="ri:stop-circle-line"
                        />
                        <SvgIcon v-else icon="ri:send-plane-fill" />
                      </span>
                    </template>
                  </NButton>
                </div>
              </div>
            </template>
          </NInput>
        </template>
      </NAutoComplete>

      <!-- PC端 -->
      <div v-if="!isMobile" class="top-bar">
        <div v-if="st" class="left">
          <div
            v-if="homeStore.myData.local != 'draw'"
            class="chage-model-select"
            @click="st.isShow = true"
          >
            <template v-if="nGptStore.gpts">
              <SvgIcon icon="ri:apps-fill" />
              <span class="line-clamp-1 overflow-hidden">{{
                nGptStore.gpts.name
              }}</span>
            </template>
            <template v-else>
              <SvgIcon icon="heroicons:sparkles" />
              <span>模型:{{
                nGptStore.modelLabel
                  ? truncateText(nGptStore.modelLabel, 20)
                  : "deepseek/deepseek-r1"
              }}
                {{
                  nGptStore.kid
                    ? `知识库:${truncateText(nGptStore.kName, 10)}`
                    : ""
                }}</span>
            </template>
            <SvgIcon icon="icon-park-outline:right" />
          </div>
          <NDropdown
            trigger="hover"
            :options="drOption"
            @select="handleSelectASR"
          >
            <div class="relative; w-[22px]" style="margin: 0 12px">
              <div
                v-if="st.micStart"
                class="absolute bottom-[14px] left-[31px]"
              >
                <span class="relative flex h-3 w-3">
                  <span
                    class="animate-ping absolute inline-flex h-full w-full rounded-full bg-red-500 opacity-75"
                  />
                  <span
                    class="relative inline-flex rounded-full h-3 w-3 bg-red-400"
                  />
                </span>
              </div>
              <IconSvg icon="voice" width="19px" height="19px" />
            </div>
          </NDropdown>
          <!-- 上传按钮（PC端） -->
          <NTooltip trigger="hover">
            <template #trigger>
              <SvgIcon
                v-if="st.isLoad == 1"
                icon="line-md:uploading-loop"
                class="absolute bottom-[10px] left-[8px] cursor-pointer"
              />
              <IconSvg
                v-else
                icon="upload"
                width="19px"
                height="19px"
                @click="fsRef.click()"
              />
            </template>
            <div
              v-if="canVisionModel(gptConfigStore.myData.model)"
              v-html="$t('mj.upPdf')"
            />
            <div v-else v-html="$t('mj.upImg')" />
          </NTooltip>
          <IconSvg
            icon="screenshot"
            width="19px"
            height="19px"
            :style="{ marginLeft: '10px' }"
            @click="handleExport"
          />
          <IconSvg
            class="right"
            icon="clear"
            width="19px"
            height="19px"
            @click="handleClear"
          />
        </div>
        <div class="send" @click="handleSubmit">
          <IconSvg
            icon="send"
            style="margin-right: 0px !important"
            class="right"
            width="29px"
            height="19px"
          />
        </div>
      </div>
    </div>
  </div>

  <NModal
    v-model:show="st.isShow"
    preset="card"
    :title="$t('mjchat.modelChange')"
    class="!max-w-[620px]"
    @close="st.isShow = false"
  >
    <aiModel @close="st.isShow = false" />
  </NModal>

  <PromptStore v-model:visible="show" />
</template>

<style>
/* 明暗主题 */
.myinputs .n-input .n-input-wrapper {
  display: flex;
  align-items: stretch;
  background: var(--n-color) !important; /* 使用 Naive UI 的颜色变量 */
}

/* 暗黑模式 */
html.dark .myinputs .n-input .n-input-wrapper,
body.dark .myinputs .n-input .n-input-wrapper {
  background: #232627!important; /* 暗黑模式背景 */
}
</style>
