import json
import os
import tempfile

import numpy as np
import pandas as pd

from .asr_infer import Transcriber
from core import Predictor
from typing import Dict, Any
from utils.logger import get_logger
from addict import Dict as ADict

logger = get_logger()


class ASRPredictor:
    def __init__(self, cfg: Dict):
        self.cfg = ADict(cfg)
        self.params = self.cfg.model.params
        self.model = Transcriber(self.params.sample_rate,
                                 self.params.max_step,
                                 self.params.merge_thr,
                                 self.params.batch_size_threshold_s
                                 )

    def __call__(self, batch: Dict[str, Any]) -> Dict:
        task_id = batch['task_id']
        # s3_prefix = f"s3://{self.bucket_name}"

        # silent_points = self.read_silent_points(silent_points, self.storage)
        try:
            if batch["embedding_progress"] != "completed":
                batch["transcribe_progress"] = "failed"
                return batch

            batch["transcribe_progress"] = "processing"
            logger.info(f"start transcribe audio: {batch['path']}")
            silent_points_path = batch['silent_points_path']
            silent_points = np.load(silent_points_path).tolist()
            duration = batch['duration']
            # saved_path = self.generate_name(batch['path'], ".csv")
            # transcript_saved_path = f"{s3_prefix}/transcript/{task_id}/{saved_path}"
            transcript_saved_path = os.path.splitext(batch['path'])[0] + "-transcript.csv"
            outputs = self.model(silent_points, batch['path'], duration)
            df = pd.DataFrame(columns=["text", "start", "end", "spk_id"])
            for out in outputs:
                df.loc[len(df)] = out
            # self.storage.put_csv(df, transcript_saved_path)
            df.to_csv(transcript_saved_path, index=False)
            batch["transcript_saved_path"] = transcript_saved_path
            batch["transcribe_progress"] = "completed"
            logger.info(f"transcribe for audio {batch['path']} completed")
        except:
            import traceback
            error_info = traceback.format_exc()
            logger.error(f"error occurs in audio {batch['path']} transcribe: {error_info}")
            batch["transcribe_progress"] = "failed"
            batch["transcript_saved_path"] = None
        batch["update_status"] = "transcribe"
        with open(batch["meta_path"], "w", encoding="utf-8") as f:
            json_data = json.dumps(batch, indent=4, ensure_ascii=True)
            f.write(json_data)
        return batch
