# -*- coding: utf-8 -*-
# ==========================
# @Time    : 2023/12/6
# @Author  : zhoutengwei
# @File    : storage.py
# ==========================
import io
import json
import uuid

import cv2
import requests
import torch
import copy
import zhipuai
import tempfile
import mimetypes
import numpy as np
import pyarrow as pa

from pyarrow import parquet as pq
from .funasr import FunasrInfer
from .speaker import Speaker
from storage import MinioStorage
from torchaudio.io import StreamReader
from modelscope.pipelines import pipeline
from modelscope.utils.constant import Tasks
from rapidocr_onnxruntime import RapidOCR, VisRes
from datetime import datetime
from loguru import logger
from typing import Dict, Any, List

class SpeakerPredictor:
    def __init__(self, model_path, sample_rate, stride, interval):
        self.stride = stride
        self.interval = interval
        self.sample_rate = sample_rate
        self.model = Speaker(onnx_path=model_path)
        logger.info("init model success")

    def __call__(self, batch: Dict[Any, np.array]) -> Dict:
        streamer = StreamReader(batch['filename'])
        streamer.add_basic_audio_stream(
            frames_per_chunk=self.stride * self.interval,
            # frames_per_chunk=sample_rate*1200,
            sample_rate=self.sample_rate,
            format="s16p",
            num_channels=1,
            decoder_option={"threads": "0"}
        )
        embs = []
        for chunk in streamer.stream():
            data = chunk[0].data.T
            pred = self.model.extract_embedding(data.to(torch.float32))
            embs.append(pred)
        # if self.mode == "offline":
        #     response.close()
        #     response.release_conn()
        prediction = np.concatenate(embs)
        logger.info(prediction.shape)
        return {"uuid": batch['ID'],
                'prediction': prediction}


def replace_content_in_file(file_path, replacement):
    with open(file_path, 'r', encoding='utf-8') as file:
        content = file.read()

    modified_content = content.replace('{input}', replacement)
    return modified_content


class ASRPredictor:
    def __init__(self, model_path, chunk_size, chunk_interval,
                 sample_rate, interval, min_silent_time, storage_obj):

        chunk_size = [int(x) for x in chunk_size.split(",")]
        self.sample_rate = sample_rate
        self.stride = int(60 * chunk_size[1] / chunk_interval / 1000 * self.sample_rate * 2)
        self.interval = interval
        self.min_silent_time = min_silent_time
        self.inference_pipeline = pipeline(
            task=Tasks.auto_speech_recognition,
            model=model_path,
        )
        self.model = FunasrInfer(self.inference_pipeline, self.stride,
                                 self.interval, self.min_silent_time)
        self.storage_obj = storage_obj
        logger.info("init model success")

    def __call__(self, batch: Dict[Any, np.array]) -> Dict:
        file_url = batch['link'][0]
        try:
            df = self.model(file_url)
            result = df.reset_index()
            with tempfile.NamedTemporaryFile(mode='w', suffix='.csv', delete=True) as temp_file:
                result.to_csv(temp_file.name, index=False)
                resp = self.storage_obj.fput_object(bucket_name="specific-inspection-transcribe",
                                                    object_name=batch['aweme_id'] + ".csv",
                                                    file_name=temp_file.name)
                presigned_url = self.storage_obj.presigned_get_object("specific-inspection-transcribe",
                                                                      batch['aweme_id'] + ".csv", 7)
                dataTypeToActions = {
                    'base_info': batch,  # 基本信息
                    'screen_shot': None,  # 截图并OCR
                    'generate_txt': presigned_url,  # 转录
                    'status': "processing"  # 状态
                }
                status_data = {
                    "ts": datetime.now().strftime("%Y-%m-%d %H:%M:%S"),
                    "task_id": batch['task_id'],
                    "status": "processing",
                    "type": "generate_txt",
                    "data": dataTypeToActions
                }
                with open("generate_txt.json", 'w') as f:
                    json.dump(status_data, f, ensure_ascii=False)
                # result['文本段'] = pa.array(result['文本段'], type=pa.string())
                try:
                    resp = requests.post(batch['cb_url'], json=status_data)
                    if resp.status_code==200:
                        logger.info(f"post {batch['cb_url']} success!")
                except:
                    logger.error("callback_url is not valid!")
                # with open("generate_txt.json", 'w') as f:
                #     json.dump(status_data, f)
            return {"msg": "success", "aweme_id": batch['aweme_id'], "file_links": batch['link']}
        except Exception as e:
            logger.error(e)
            return {"msg": "failed", "aweme_id": batch['aweme_id'], "file_links": batch['link']}


class OCRPredictor:
    def __init__(self, font_path, storage_obj):
        self.engine = RapidOCR()
        self.vis = VisRes()
        self.font_path = font_path
        self.storage_obj = storage_obj

    def __call__(self, batch: Dict[Any, np.array]) -> Dict:
        file_url = [file for file in batch['link'] if not file.endswith(".mp3")][0]
        logger.info(file_url)
        streamer = StreamReader(file_url)
        streamer.add_basic_video_stream(
            frames_per_chunk=1,
            frame_rate=1,
            format="bgr24"
        )
        count = 0
        screen_shots = []
        for chunks in streamer.stream():
            data = chunks[0].data.permute(0, 2, 3, 1).squeeze(dim=0).numpy()
            logger.info(data.shape)
            if count > 6:
                break
            # if count % 2 != 0:
            #     count += 1
            #     continue
            count += 1
            try:
                result, elapse_list = self.engine(data)
                if result is None:
                    continue
                boxes, txts, scores = list(zip(*result))
                logger.info(txts)
                img = self.vis(data, boxes, txts, scores, self.font_path)
                # 将图像转换为 Base64 编码
                # cv2.cvtColor(res, )
                _, img_encoded = cv2.imencode('.jpg', img, [cv2.IMWRITE_JPEG_QUALITY, 90])
                # 将字节串转换为BytesIO对象
                # img_rgb = cv2.cvtColor(img_encoded, cv2.COLOR_RGB2BGR)
                img_byte_arr = io.BytesIO(img_encoded.tobytes())

                resp = self.storage_obj.put_object(bucket_name="ocr-images",
                                                   object_name=batch['aweme_id'] + f"_{count}.jpg",
                                                   data=img_byte_arr,
                                                   mtype=mimetypes.types_map[".jpg"])
                presigned_url = self.storage_obj.presigned_get_object("ocr-images",
                                                                      batch['aweme_id'] + f"_{count}.jpg", 7)
                logger.info(presigned_url)
                screen_shots.append(presigned_url)
            except Exception as e:
                logger.error(e)
        dataTypeToActions = {
            'base_info': batch,  # 基本信息
            'screen_shot': screen_shots,  # 截图并OCR
            'generate_txt': None,  # 转录
            'status': "processing"  # 状态
        }
        status_data = {
            "ts": datetime.now().strftime("%Y-%m-%d %H:%M:%S"),
            "task_id": batch['task_id'],
            "status": "processing",
            "type": "screen_shot",
            "data": dataTypeToActions
        }
        # logger.info(status_data)
        with open("screen_shot.json", 'w') as f:
            json.dump(status_data, f, ensure_ascii=False)
        try:
            resp = requests.post(batch['cb_url'], json=status_data)
            if resp.status_code == 200:
                logger.info(f"post {batch['cb_url']} success!")
        except:
            logger.error("callback_url is not valid!")
        # with open("screen_shot.json", 'w') as f:
        #     json.dump(status_data, f)
        # logger.info(status_data)
        return {"msg": "success", "aweme_id": batch['aweme_id'], "file_links": batch['link']}


class UnseekableWrapper:
    def __init__(self, obj):
        self.obj = obj

    def read(self, n):
        return self.obj.read(n)
