from django.shortcuts import render
from rest_framework import viewsets, status
from rest_framework.response import Response
from .models import audio, voiceprint, recorder, test
from django.http.request import QueryDict
from .serializers import AudioSerializer, VoiceprintSerializer, RecorderSerializer, TestSerializer, VoiceprintSerializer
import whisper
import scipy.io.wavfile as wavefile
from voiceprint_recognition.voiceprint_recognition import VoiceprintRecognition
import base64
import json
import numpy as np
import os

"""
GET     /books/      提供所有记录
POST    /books/      新增一条记录
GET     /books/<pk>/ 提供指定id的记录
PUT     /books/<pk>/ 修改指定id的记录
DELETE  /books/<pk>/ 删除指定id的记录

响应数据 JSON
# 列表视图： 路由后面没有pk/ID
# 详情视图： 路由后面 pk/ID

"""


class AudioViewSet(viewsets.ModelViewSet):
    """
    API endpoint that allows users to be viewed or edited.
    """
    queryset = audio.objects.all().order_by('-posttime')
    serializer_class = AudioSerializer
    # 'tiny' 'base' 'small' 'medium' 'large'
    whisperModel = whisper.load_model("tiny")


    def create(self, request, *args, **kwargs):
        memoryFileList = request.FILES.getlist('audio')

        if "method" in request.data.keys():
            # 入库
            if (request.data["method"] == "post_db"):
                for memoryFile in memoryFileList:
                    basename = os.path.basename(memoryFile.name)
                    username = os.path.splitext(basename)[0]

                    newData = QueryDict(mutable=True)
                    newData.update({"user": username})
                    
                    tmpName = 'tmp.wav'
                    with open(tmpName, 'wb+') as tmpFile:
                        for chunk in memoryFile.chunks():
                            tmpFile.write(chunk)

                    result = self.whisperModel.transcribe(tmpName)
                    newData.update({"content": result["text"]})
                    newData.update({"audio": memoryFile})
                    serializer = self.get_serializer(data=newData)  # 对上传的图片序列化
                    serializer.is_valid(raise_exception=True)
                    self.perform_create(serializer)
                    headers = self.get_success_headers(serializer.data)
                return Response(serializer.data, status=status.HTTP_201_CREATED, headers=headers)
        else:
            # 仅支持单个文件的语音识别
            for memoryFile in memoryFileList:
                basename = os.path.basename(memoryFile.name)
                username = os.path.splitext(basename)[0]

                newData = QueryDict(mutable=True)
                newData.update({"user": username})
                
                tmpName = 'tmp.wav'
                with open(tmpName, 'wb+') as tmpFile:
                    for chunk in memoryFile.chunks():
                        tmpFile.write(chunk)

                result = self.whisperModel.transcribe(tmpName)
                newData.update({"content": result["text"]})
                return Response(newData, status=status.HTTP_200_OK)


    def destroy(self, request, *args, **kwargs):
        instance = self.get_object()
        instance.audio.delete()  # 删除关联的底层文件
        self.perform_destroy(instance)
        return Response(status=status.HTTP_204_NO_CONTENT)


class VoiceprintViewSet(viewsets.ModelViewSet):
    """
    API endpoint that allows users to be viewed or edited.
    """
    queryset = voiceprint.objects.all().order_by('-posttime')
    serializer_class = VoiceprintSerializer
    whisperModel = AudioViewSet.whisperModel
    voiceprint_recognition = VoiceprintRecognition()

    def create(self, request, *args, **kwargs):
        if "method" in request.data.keys():
            # 提取特征并入库
            if (request.data["method"] == "get_feature"):
                request.data.pop('method')
                memoryFile = request.FILES['audio']
                tmpName = 'tmp1.wav'

                with open(tmpName, 'wb+') as tmpFile:
                    for chunk in memoryFile.chunks():
                        tmpFile.write(chunk)

                result = self.whisperModel.transcribe(tmpName)
                request.data['content'] = result["text"]

                feature = self.voiceprint.get_feature(tmpName)[0]
                base64_fea = base64.b64encode(feature)
                request.data['voiceprint'] = base64_fea
                data = {"feature": base64_fea, "content": result["text"]}
                return Response(data, status=status.HTTP_200_OK)
            elif (request.data["method"] == "post_db"):
                memoryFileList = request.FILES.getlist('audio')
                res_failed = {}
                for memoryFile in memoryFileList:
                    basename = os.path.basename(memoryFile.name)
                    username = os.path.splitext(basename)[0]

                    newData = QueryDict(mutable=True)
                    newData.update({"user": username})
                    newData.update({"audio": memoryFile})

                    tmpName = 'tmp1.wav'

                    with open(tmpName, 'wb+') as tmpFile:
                        for chunk in memoryFile.chunks():
                            tmpFile.write(chunk)

                    result = self.whisperModel.transcribe(tmpName)
                    newData.update({'content': result['text']})

                    feature = self.voiceprint_recognition.get_feature(tmpName)[
                        0]
                    base64_fea = base64.b64encode(feature)
                    newData.update({'voiceprint': base64_fea.decode('utf-8')})
                    serializer = self.get_serializer(
                        data=newData)  # 对上传的图片序列化
                    serializer.is_valid(raise_exception=True)
                    self.perform_create(serializer)
                return Response(res_failed, status=status.HTTP_201_CREATED)
            # 1v1 特征比对，输入两个音频文件
            elif (request.data["method"] == "compare_1v1"):
                request.data.pop('method')
                if ("audio1" in request.data.keys() and "audio2" in request.data.keys()):
                    audio1 = request.data["audio1"]
                    audio2 = request.data["audio2"]
                    tmpName1 = "tmp1.wav"
                    tmpName2 = "tmp2.wav"
                    with open(tmpName1, 'wb+') as tmpFile:
                        for chunk in audio1.chunks():
                            tmpFile.write(chunk)
                    with open(tmpName2, 'wb+') as tmpFile:
                        for chunk in audio2.chunks():
                            tmpFile.write(chunk)
                    sim = self.voiceprint_recognition.compare_1v1(
                        tmpName1, tmpName2)
                    data = {"score": sim}
                    return Response(data, status=status.HTTP_200_OK)
                else:
                    return Response({}, status=status.HTTP_400_BAD_REQUEST)

            # 1vN 特征比对，输入一个音频文件，返回top1的底库特征相似度以及名词
            elif (request.data["method"] == "compare_1vN"):
                audio = request.FILES['audio']
                tmpName = 'tmp1.wav'
                with open(tmpName, 'wb+') as tmpFile:
                    for chunk in audio.chunks():
                        tmpFile.write(chunk)
                voiceprint_fea = self.voiceprint_recognition.get_feature(tmpName)[
                    0]
                queryset = voiceprint.objects.all()
                serializer = VoiceprintSerializer(queryset, many=True)
                instances = serializer.instance
                max_index = -1
                max_value = 0
                for (ind, instance) in enumerate(instances):
                    db_voiceprint_fea = instance.voiceprint
                    db_voiceprint_fea = np.frombuffer(
                        base64.b64decode(db_voiceprint_fea), np.float32)
                    if len(db_voiceprint_fea) != 192:
                        continue
                    sim = np.dot(voiceprint_fea, db_voiceprint_fea) / \
                        (np.linalg.norm(voiceprint_fea) *
                         np.linalg.norm(db_voiceprint_fea))
                    if (sim > max_value):
                        max_index = ind
                        max_value = sim
                if(max_index >= 0):
                    # serializer = self.get_serializer(instances[max_index])
                    data = {"score": max_value,
                            "user": instances[max_index].user}
                    return Response(data, status=status.HTTP_200_OK)
                else:
                    return Response({})


class RecorderViewSet(viewsets.ModelViewSet):
    """
    API endpoint that allows users to be viewed or edited.
    """
    queryset = recorder.objects.all().order_by('-posttime')
    serializer_class = RecorderSerializer


class TestViewSet(viewsets.ModelViewSet):
    """
    API endpoint that allows users to be viewed or edited.
    """
    queryset = test.objects.all().order_by('-posttime')
    serializer_class = TestSerializer
