package com.example.wechat.login.controller;

import com.example.wechat.login.dto.ApiResponse;
import com.example.wechat.login.dto.speech.SpeechRecognitionRequest;
import com.example.wechat.login.dto.speech.SpeechRecognitionResponse;
import com.example.wechat.login.dto.speech.SpeechTaskRequest;
import com.example.wechat.login.dto.speech.SpeechTaskResponse;
import com.example.wechat.login.entity.SpeechRecognitionHistory;
import com.example.wechat.login.mapper.SpeechRecognitionHistoryMapper;
import com.example.wechat.login.annotation.Idempotent;
import com.example.wechat.login.annotation.DistributedLock;
import com.example.wechat.login.service.SpeechRecognitionHistoryService;
import com.example.wechat.login.service.SpeechService;
import com.tencentcloudapi.common.exception.TencentCloudSDKException;
import lombok.extern.slf4j.Slf4j;
import org.springframework.format.annotation.DateTimeFormat;
import org.springframework.http.MediaType;
import org.springframework.http.ResponseEntity;
import org.springframework.security.authentication.UsernamePasswordAuthenticationToken;
import org.springframework.security.core.context.SecurityContextHolder;
import org.springframework.web.bind.annotation.*;
import org.springframework.web.multipart.MultipartFile;

import java.io.IOException;
import java.time.LocalDateTime;
import java.util.Base64;
import java.util.List;

/**
 * 语音识别控制器
 */
@RestController
@RequestMapping("/api/speech")
@Slf4j
public class SpeechController {

    private final SpeechService speechService;
    private final SpeechRecognitionHistoryService historyService;
    private final SpeechRecognitionHistoryMapper historyRepository;

    public SpeechController(SpeechService speechService, SpeechRecognitionHistoryService historyService, SpeechRecognitionHistoryMapper historyRepository) {
        this.speechService = speechService;
        this.historyService = historyService;
        this.historyRepository = historyRepository;
    }

    /**
     * 一句话语音识别（通过JSON请求）
     *
     * @param  file 音频文件
     * @return 语音识别响应
     */
    @PostMapping(value = "/recognize",consumes = MediaType.MULTIPART_FORM_DATA_VALUE)
    public ApiResponse<SpeechRecognitionResponse> recognizeSpeech(@RequestParam("file") MultipartFile file,
                                                                  @RequestParam(value = "engineType", defaultValue = "16k_zh") String engineType,
                                                                  @RequestParam(value = "channelNum", defaultValue = "1") Integer channelNum,
                                                                  @RequestParam(value = "filterDirty", defaultValue = "0") Integer filterDirty,
                                                                  @RequestParam(value = "filterModal", defaultValue = "0") Integer filterModal,
                                                                  @RequestParam(value = "convertNumMode", defaultValue = "1") Integer convertNumMode,
                                                                  @RequestParam(value = "speakerDiarization", defaultValue = "0") Integer speakerDiarization,
                                                                  @RequestParam(value = "speakerNumber", defaultValue = "0") Integer speakerNumber

    ) {

        try {
            log.info("接收到语音文件上传请求，文件名：{}", file.getOriginalFilename());

            // 获取文件类型
            String audioType = getAudioType(file.getOriginalFilename());

            // 将文件转换为Base64编码
            String base64Audio = Base64.getEncoder().encodeToString(file.getBytes());

            // 构建请求对象
            SpeechRecognitionRequest request = new SpeechRecognitionRequest();


            request.setAudioBase64(base64Audio);
            //通过上传的文件获得文件的后缀
            request.setAudioType(audioType);
            //语言
            request.setEngineType(engineType);
            //是否过滤脏词
            request.setFilterDirty(filterDirty);
            //是否过语气词
            request.setFilterModal(filterModal);
            //是否进行阿拉伯数字智能转换
            request.setConvertNumMode(convertNumMode);

            // 调用服务
            SpeechRecognitionResponse response = speechService.recognizeSpeech(request);
            UsernamePasswordAuthenticationToken authentication = (UsernamePasswordAuthenticationToken) SecurityContextHolder.getContext().getAuthentication();
            String openid= (String) authentication.getPrincipal();
            // 如果提供了openid，保存语音识别历史记录
            if (openid != null && !openid.isEmpty()) {
                historyService.saveHistory(openid, null,response.getRequestId(), file);
                log.info("已保存语音识别历史记录，openid: {}", openid);
            }
            
            return ApiResponse.success(response);

        } catch (IOException e) {
            log.error("处理上传文件异常", e);
            return ApiResponse.error(e.getMessage());
        } catch (TencentCloudSDKException e) {
            throw new RuntimeException(e);
        }
    }

    /**
     * 一句话语音识别（通过文件上传）
     *
     * @param file               音频文件
     * @param engineType         引擎类型，默认为16k_zh
     * @param filterDirty        是否过滤脏词，默认为0
     * @param filterModal        是否过滤语气词，默认为0
     * @param convertNumMode     是否进行阿拉伯数字智能转换，默认为1
     * @return 语音识别响应
     */
    @PostMapping(value = "/recognize/upload", consumes = MediaType.MULTIPART_FORM_DATA_VALUE)
    public ResponseEntity<SpeechRecognitionResponse> recognizeSpeechByUpload(
            @RequestParam("file") MultipartFile file,
            @RequestParam(value = "engineType", defaultValue = "16k_zh") String engineType,
            @RequestParam(value = "filterDirty", defaultValue = "0") Integer filterDirty,
            @RequestParam(value = "filterModal", defaultValue = "0") Integer filterModal,
            @RequestParam(value = "convertNumMode", defaultValue = "1") Integer convertNumMode
    ) {
        try {
            log.info("接收到语音文件上传请求，文件名：{}", file.getOriginalFilename());

            // 获取文件类型
            String audioType = getAudioType(file.getOriginalFilename());

            // 将文件转换为Base64编码
            String base64Audio = Base64.getEncoder().encodeToString(file.getBytes());

            // 构建请求对象
            SpeechRecognitionRequest request = new SpeechRecognitionRequest();
            request.setAudioBase64(base64Audio);
            request.setAudioType(audioType);
            request.setEngineType(engineType);
            request.setFilterDirty(filterDirty);
            request.setFilterModal(filterModal);
            request.setConvertNumMode(convertNumMode);

            // 调用服务
            SpeechRecognitionResponse response = speechService.recognizeSpeech(request);
            return ResponseEntity.ok(response);

        } catch (IOException | TencentCloudSDKException e) {
            log.error("处理上传文件异常", e);
            SpeechRecognitionResponse response = SpeechRecognitionResponse.builder()
                    .message("处理上传文件异常: " + e.getMessage())
                    .build();
            return ResponseEntity.ok(response);
        }
    }

    /**
     * 创建语音识别任务
     *
     * @param
     * @return 语音任务响应
     */
    @PostMapping("/task/create")
    @DistributedLock
    @Idempotent(expireTime = 2000)
    public ApiResponse<SpeechTaskResponse> createSpeechTask( @RequestParam(name = "file", required = false) MultipartFile file,
                                                                @RequestParam(name ="audioUrl", required = false) String audioUrl,
                                                                @RequestParam(value = "engineType", defaultValue = "16k_zh") String engineType,
                                                                @RequestParam(value = "filterDirty", defaultValue = "0") Integer filterDirty,
                                                                @RequestParam(value = "filterModal", defaultValue = "0") Integer filterModal,
                                                                @RequestParam(value = "convertNumMode", defaultValue = "1") Integer convertNumMode) {
        log.info("接收到创建语音识别任务请求");
        String name = file.getName();
        UsernamePasswordAuthenticationToken authentication = (UsernamePasswordAuthenticationToken) SecurityContextHolder.getContext().getAuthentication();
        String openid= (String) authentication.getPrincipal();
        // 检查当前用户和文件名是否已在历史表中存在
        if (historyRepository.existsByOpenidAndAudioFilePathContaining(openid, name)) {
            SpeechTaskResponse build = SpeechTaskResponse.builder()
                    .code(1)
                    .message("当前用户和文件名已存在历史记录中")
                    .build();
            return ApiResponse.success(build);
        }
        SpeechTaskRequest request = new SpeechTaskRequest();
        request.setFile(file);
        request.setAudioUrl(audioUrl);
        request.setEngineType(engineType);
        request.setFilterDirty(filterDirty);
        request.setFilterModal(filterModal);
        request.setConvertNumMode(convertNumMode);
        SpeechTaskResponse response = speechService.createSpeechTask(request);
        log.info("已创建语音识别任务，任务ID：{}", response.getTaskId());
        // 调用服务
        // 如果提供了openid，保存语音识别历史记录
        if (openid != null && !openid.isEmpty()) {
            historyService.saveHistory(openid, response.getTaskId(),response.getRequestId(), file);
            log.info("已保存语音识别历史记录，openid: {}", openid);
        }
        return ApiResponse.success(response);
    }

    /**
     * 查询语音识别任务状态
     *
     * @param taskId 任务ID
     * @return 语音任务响应
     */
    @GetMapping("/task/{taskId}")
    public ResponseEntity<SpeechTaskResponse> querySpeechTask(@PathVariable String taskId) {
        log.info("接收到查询语音识别任务请求，任务ID：{}", taskId);
        SpeechTaskResponse response = speechService.querySpeechTask(taskId);
        return ResponseEntity.ok(response);
    }

    /**
     * 根据文件名获取音频类型
     *
     * @param fileName 文件名
     * @return 音频类型
     */
    private String getAudioType(String fileName) {
        if (fileName == null) {
            return "wav";
        }

        String lowerFileName = fileName.toLowerCase();
        if (lowerFileName.endsWith(".wav")) {
            return "wav";
        } else if (lowerFileName.endsWith(".mp3")) {
            return "mp3";
        } else if (lowerFileName.endsWith(".m4a")) {
            return "m4a";
        } else if (lowerFileName.endsWith(".flv")) {
            return "flv";
        } else if (lowerFileName.endsWith(".amr")) {
            return "amr";
        } else if (lowerFileName.endsWith(".3gp")) {
            return "3gp";
        } else if (lowerFileName.endsWith(".wma")) {
            return "wma";
        } else if (lowerFileName.endsWith(".ogg")) {
            return "ogg";
        } else if (lowerFileName.endsWith(".aac")) {
            return "aac";
        }

        return lowerFileName;
    }
    
    /**
     * 查询用户的语音识别历史记录列表
     *
     * @param
     * @return 历史记录列表
     */
    @GetMapping("/history")
    public ApiResponse<List<SpeechRecognitionHistory>> getHistoryList() {
        try {
            UsernamePasswordAuthenticationToken authentication = (UsernamePasswordAuthenticationToken) SecurityContextHolder.getContext().getAuthentication();
            String openid= (String) authentication.getPrincipal();
            List<SpeechRecognitionHistory> historyList = historyService.findByOpenid(openid);
            return ApiResponse.success(historyList);
        } catch (Exception e) {
            log.error("查询语音识别历史记录失败", e);
            return ApiResponse.error("查询语音识别历史记录失败: " + e.getMessage());
        }
    }
    

    
    /**
     * 根据时间范围查询用户的语音识别历史记录
     *
     * @param openid 用户openid
     * @param startTime 开始时间
     * @param endTime 结束时间
     * @return 历史记录列表
     */
    @GetMapping("/history/timerange")
    public ApiResponse<List<SpeechRecognitionHistory>> getHistoryByTimeRange(
            @RequestParam String openid,
            @RequestParam @DateTimeFormat(iso = DateTimeFormat.ISO.DATE_TIME) LocalDateTime startTime,
            @RequestParam @DateTimeFormat(iso = DateTimeFormat.ISO.DATE_TIME) LocalDateTime endTime) {
        try {
            List<SpeechRecognitionHistory> historyList = historyService.findByOpenidAndTimeRange(openid, startTime, endTime);
            return ApiResponse.success(historyList);
        } catch (Exception e) {
            log.error("根据时间范围查询语音识别历史记录失败", e);
            return ApiResponse.error("根据时间范围查询语音识别历史记录失败: " + e.getMessage());
        }
    }
    
    /**
     * 根据ID查询单条历史记录
     *
     * @param id 历史记录ID
     * @return 历史记录
     */
    @GetMapping("/history/{id}")
    public ApiResponse<SpeechRecognitionHistory> getHistoryById(@PathVariable Long id) {
        try {
            SpeechRecognitionHistory history = historyService.findById(id);
            log.info("已查询到历史记录详情，ID: {}", id);
            return ApiResponse.success(history);
        } catch (Exception e) {
            log.error("查询语音识别历史记录详情失败", e);
            return ApiResponse.error("查询语音识别历史记录详情失败: " + e.getMessage());
        }
    }
    
    /**
     * 删除历史记录
     *
     * @param id 历史记录ID
     * @return 操作结果
     */
    @DeleteMapping("/history/{id}")
    public ApiResponse<Void> deleteHistory(@PathVariable Long id) {
        try {
            historyService.deleteById(id);
            log.info("已删除历史记录，ID: {}", id);
            return ApiResponse.success(null);
        } catch (Exception e) {
            log.error("删除语音识别历史记录失败", e);
            return ApiResponse.error("删除语音识别历史记录失败: " + e.getMessage());
        }
    }
    
    /**
     * 根据音频格式查询历史记录
     *
     * @param openid 用户openid
     * @param audioFormat 音频格式
     * @return 历史记录列表
     */
    @GetMapping("/history/format")
    public ApiResponse<List<SpeechRecognitionHistory>> getHistoryByAudioFormat(
            @RequestParam String openid,
            @RequestParam String audioFormat) {
        try {
            List<SpeechRecognitionHistory> historyList = historyService.findByOpenidAndAudioFormat(openid, audioFormat);
            return ApiResponse.success(historyList);
        } catch (Exception e) {
            log.error("根据音频格式查询语音识别历史记录失败", e);
            return ApiResponse.error("根据音频格式查询语音识别历史记录失败: " + e.getMessage());
        }
    }
    
    /**
     * 根据引擎类型查询历史记录
     *
     * @param openid 用户openid
     * @param engineType 引擎类型
     * @return 历史记录列表
     */
    @GetMapping("/history/engine")
    public ApiResponse<List<SpeechRecognitionHistory>> getHistoryByEngineType(
            @RequestParam String openid,
            @RequestParam String engineType) {
        try {
            List<SpeechRecognitionHistory> historyList = historyService.findByOpenidAndEngineType(openid, engineType);
            return ApiResponse.success(historyList);
        } catch (Exception e) {
            log.error("根据引擎类型查询语音识别历史记录失败", e);
            return ApiResponse.error("根据引擎类型查询语音识别历史记录失败: " + e.getMessage());
        }
    }
    
    /**
     * 批量删除历史记录
     *
     * @param ids 历史记录ID列表
     * @return 操作结果
     */
    @DeleteMapping("/history/batch")
    public ApiResponse<Void> batchDeleteHistory(@RequestBody List<Long> ids) {
        try {
            for (Long id : ids) {
                historyService.deleteById(id);
            }
            return ApiResponse.success(null);
        } catch (Exception e) {
            log.error("批量删除语音识别历史记录失败", e);
            return ApiResponse.error("批量删除语音识别历史记录失败: " + e.getMessage());
        }
    }
    
    /**
     * 删除指定用户的所有历史记录
     *
     * @return 操作结果
     */
    @DeleteMapping("/history/user")
    public ApiResponse<Void> deleteUserHistory() {
        try {
            UsernamePasswordAuthenticationToken authentication = (UsernamePasswordAuthenticationToken) SecurityContextHolder.getContext().getAuthentication();
            String openid= (String) authentication.getPrincipal();
            historyService.deleteByOpenid(openid);
            return ApiResponse.success(null);
        } catch (Exception e) {
            log.error("删除用户语音识别历史记录失败", e);
            return ApiResponse.error("删除用户语音识别历史记录失败: " + e.getMessage());
        }
    }
    
    /**
     * 删除指定用户在时间范围内的历史记录
     *
     * @param openid 用户openid
     * @param startTime 开始时间
     * @param endTime 结束时间
     * @return 操作结果
     */
    @DeleteMapping("/history/timerange")
    public ApiResponse<Void> deleteHistoryByTimeRange(
            @RequestParam String openid,
            @RequestParam @DateTimeFormat(iso = DateTimeFormat.ISO.DATE_TIME) LocalDateTime startTime,
            @RequestParam @DateTimeFormat(iso = DateTimeFormat.ISO.DATE_TIME) LocalDateTime endTime) {
        try {
            historyService.deleteByOpenidAndTimeRange(openid, startTime, endTime);
            return ApiResponse.success(null);
        } catch (Exception e) {
            log.error("删除时间范围内的语音识别历史记录失败", e);
            return ApiResponse.error("删除时间范围内的语音识别历史记录失败: " + e.getMessage());
        }
    }
}