import json
import os.path
import tempfile
from typing import Dict, Any

import numpy as np
import pandas as pd

# from core import Predictor
from .cut_agent import CutAgent
from utils.logger import get_logger
from utils.timer import Timer
from addict import Dict as ADict

logger = get_logger()


class AutoCutPredictor:
    def __init__(self, cfg, mode='real'):
        self.cfg = ADict(cfg)
        self.mode = mode
        self.params = self.cfg.model.params
        self.stopwords_path = self.params.stopwords_path
        self.front_edgewords_path = self.params.front_edgewords_path
        self.back_edgewords_path = self.params.back_edgewords_path
        self.model_name = self.params.model_name
        self.chunk_size = self.params.chunk_size
        self.cut_agent = CutAgent(self.stopwords_path, self.front_edgewords_path, self.back_edgewords_path,
                                  self.params.max_step, self.model_name, self.chunk_size)
        self.timer = Timer()

    def __call__(self, batch: Dict[str, Any]) -> Dict:
        task_id = batch['task_id']
        # s3_prefix = f"s3://{self.bucket_name}"
        logger.info(f"processing meta path for {batch['meta_path']}")
        try:
            with open(batch['meta_path'], 'r') as f:
                meta_data = json.load(f)
        except FileNotFoundError as e:
            logger.error(f"could not find meta file for {task_id}")
            return batch
        try:
            # cut_dir = f'cut_result/{task_id}'
            # metadata = self.storage.get_json(batch["meta_path"])
            # batch = {**metadata, **batch}
            transcribe_progress = meta_data.get('transcribe_progress', None)
            #覆盖当前进程状态
            # batch["autocut_progress"] = "pending"
            # if batch["autocut_progress"] == "completed":
            #     return batch
            if transcribe_progress is None or transcribe_progress != "completed":
                meta_data["autocut_progress"] = "failed"
                meta_data["autocut_path"] = None
                return batch

            backtracking_points: list = batch['backtracking_points']

            silent_points = np.load(meta_data['silent_points_path'])
            logger.info(f"start autocut for task {task_id}")

            # transcript: pd.DataFrame = self.storage.get_csv(batch["transcript_saved_path"])
            transcript: pd.DataFrame = pd.read_csv(meta_data['transcript_saved_path'])
            # pred_path = self.generate_name(batch['path'], '-cut.csv')
            autocut_path = os.path.splitext(meta_data['path'])[0] + '-cut.csv'
            # autocut_path = f"{s3_prefix}/{cut_dir}/{pred_path}"
            extra_df = self.cut_agent(transcript, silent_points, backtracking_points)
            # self.storage.put_csv(extra_df, autocut_path)
            extra_df.to_csv(autocut_path, index=False)
            meta_data["autocut_path"] = autocut_path
            meta_data["autocut_progress"] = "completed"
            logger.info(f"autocut for task {task_id} completed")
        except:
            import traceback
            error_info = traceback.format_exc()
            logger.error(f"error occurs in autocut: {error_info}")
            meta_data["autocut_progress"] = "failed"
            meta_data["autocut_path"] = None
        meta_data['update_status'] = 'autocut'
        with open(batch["meta_path"], "w", encoding="utf-8") as f:
            json_data = json.dumps(meta_data, indent=4, ensure_ascii=True)
            f.write(json_data)
        return batch
        
