import os
import shutil
import json
from typing import List, Dict, Optional
from PIL import Image
from .img import img2base64

import argparse
from tqdm import tqdm
import lmdb
import pickle
import sys

sys.path.append("..")
from CLIP.feature_extractor import (
    ImageConfig,
    extract_image_features,
)


class ImgDB:
    def __init__(
        self,
        root_path: str = None,
        img_feat_path: str = None,
    ):
        self.root_path = root_path or os.path.join(
            os.path.dirname(os.path.dirname(__file__)), "data", "imgs"
        )
        self.img_dir = os.path.join(self.root_path, "raw")
        self.tsv_path = os.path.join(self.root_path, "eval_imgs.tsv")
        self.dict_path = os.path.join(self.root_path, "eval_imgs.dict")
        self.img_feat_path = img_feat_path or os.path.join(
            self.root_path, "eval.img_feat.jsonl"
        )
        self.tsv_hash_path = os.path.join(self.root_path, "tsv.hash")
        self.imgid2path = {}
        if self._tsv_and_hash_exist_and_match():
            # print("检测到db.hash和lmdb数据库均存在且哈希一致，跳过初始化步骤。")
            self._load_imgid2path()
        else:
            self._init_db()
            self._init_imgfeat()
            self._save_tsv_hash()

    # TODO:增加其他模型选择
    def _init_imgfeat(self):
        """
        提取图片特征。
        """
        extract_image_features(
            ImageConfig(
                image_data=self.root_path + "/lmdb/eval/imgs",
                output=self.img_feat_path,
                resume="data/pretrained_weights/clip_cn_rn50.pt",
                vision_model="RN50",
                text_model="RBT3-chinese",
                img_batch_size=500,
                gpu=0,
                debug=False,
            )
        )

    def _init_db(self):
        """
        遍历img_dir下所有图片，生成eval_imgs.tsv和eval_imgs.dict。
        """
        self.imgid2path = {}
        with open(self.tsv_path, "w", encoding="utf-8") as ftsv, open(
            self.dict_path, "w", encoding="utf-8"
        ) as fdict:
            for idx, fname in enumerate(os.listdir(self.img_dir)):
                img_path = os.path.join(self.img_dir, fname)
                if not os.path.isfile(img_path):
                    continue
                try:
                    img = Image.open(img_path)
                    b64 = img2base64(img)
                    ftsv.write(f"{idx}\t{b64}\n")
                    self.imgid2path[str(idx)] = img_path
                except Exception as e:
                    continue
            json.dump(self.imgid2path, fdict, ensure_ascii=False)
        self._build_lmdb()

    def delete_image(self, img_id_or_path: str) -> bool:
        """
        删除指定图片（支持图片id或图片路径），并同步更新eval_imgs.tsv和eval_imgs.dict。
        """
        # 判断输入类型
        if os.path.isfile(img_id_or_path):
            # 路径，查找id
            img_id = None
            for k, v in self.imgid2path.items():
                if v == img_id_or_path:
                    img_id = k
                    break
            if img_id is None:
                return False
        else:
            img_id = str(img_id_or_path)
            if img_id not in self.imgid2path:
                return False
        img_path = self.imgid2path[img_id]
        try:
            os.remove(img_path)
        except Exception:
            pass
        del self.imgid2path[img_id]
        # 重写tsv和dict
        with open(self.tsv_path, "w", encoding="utf-8") as ftsv:
            for idx, (k, path) in enumerate(self.imgid2path.items()):
                try:
                    img = Image.open(path)
                    b64 = img2base64(img)
                    ftsv.write(f"{idx}\t{b64}\n")
                except Exception:
                    continue
        with open(self.dict_path, "w", encoding="utf-8") as fdict:
            json.dump(self.imgid2path, fdict, ensure_ascii=False)
        # TODO:更新lmdb数据库
        return True

    def add_image(self, img_path: str) -> Optional[str]:
        """
        增加指定路径下的图片，返回分配的图片id。
        """
        if not os.path.isfile(img_path):
            return None
        new_id = str(max([int(k) for k in self.imgid2path.keys()] + [0]) + 1)
        ext = os.path.splitext(img_path)[1]
        dst_path = os.path.join(self.img_dir, f"{new_id}{ext}")
        shutil.copy2(img_path, dst_path)
        self.imgid2path[new_id] = dst_path
        # 追加tsv
        try:
            img = Image.open(dst_path)
            b64 = img2base64(img)
            with open(self.tsv_path, "a", encoding="utf-8") as ftsv:
                ftsv.write(f"{new_id}\t{b64}\n")
        except Exception:
            return None
        # 重写dict
        with open(self.dict_path, "w", encoding="utf-8") as fdict:
            json.dump(self.imgid2path, fdict, ensure_ascii=False)
        # TODO:更新lmdb数据库
        return new_id

    def get_image_path(self, img_id: str) -> Optional[str]:
        """
        根据图片id查找图片路径。
        """
        return self.imgid2path.get(str(img_id))

    def _build_lmdb(self):
        """
        直接调用Chinese-CLIP官方build_lmdb_dataset.py的build_lmdb_dataset函数生成lmdb数据库。
        """
        data_dir = self.root_path
        splits = "eval"
        lmdb_dir = None
        assert os.path.isdir(
            data_dir
        ), "The data_dir does not exist! Please check the input args..."
        specified_splits = list(set(splits.strip().split(",")))
        print("Dataset splits to be processed: {}".format(", ".join(specified_splits)))
        if lmdb_dir is None:
            lmdb_dir = os.path.join(data_dir, "lmdb")
        for split in specified_splits:
            lmdb_split_dir = os.path.join(lmdb_dir, split)
            if os.path.isdir(lmdb_split_dir):
                print(
                    "We will overwrite an existing LMDB file {}".format(lmdb_split_dir)
                )
            os.makedirs(lmdb_split_dir, exist_ok=True)
            lmdb_img = os.path.join(lmdb_split_dir, "imgs")
            env_img = lmdb.open(lmdb_img, map_size=1024**3)
            txn_img = env_img.begin(write=True)
            lmdb_pairs = os.path.join(lmdb_split_dir, "pairs")
            env_pairs = lmdb.open(lmdb_pairs, map_size=1024**3)
            txn_pairs = env_pairs.begin(write=True)
            # pairs_annotation_path = os.path.join(data_dir, f"{split}_texts.jsonl")
            # with open(pairs_annotation_path, "r", encoding="utf-8") as fin_pairs:
            #     write_idx = 0
            #     for line in tqdm(fin_pairs):
            #         line = line.strip()
            #         obj = json.loads(line)
            #         for field in ("text_id", "text", "image_ids"):
            #             assert (
            #                 field in obj
            #             ), f"Field {field} does not exist in line {line}. Please check the integrity of the text annotation Jsonl file."
            #         for image_id in obj["image_ids"]:
            #             dump = pickle.dumps((image_id, obj["text_id"], obj["text"]))
            #             txn_pairs.put(key=f"{write_idx}".encode("utf-8"), value=dump)
            #             write_idx += 1
            #             if write_idx % 5000 == 0:
            #                 txn_pairs.commit()
            #                 txn_pairs = env_pairs.begin(write=True)
            #     txn_pairs.put(key=b"num_samples", value=f"{write_idx}".encode("utf-8"))
            #     txn_pairs.commit()
            #     env_pairs.close()
            # print(
            #     f"Finished serializing {write_idx} {split} split pairs into {lmdb_pairs}."
            # )
            base64_path = self.tsv_path
            with open(base64_path, "r", encoding="utf-8") as fin_imgs:
                write_idx = 0
                for line in tqdm(fin_imgs):
                    line = line.strip()
                    image_id, b64 = line.split("\t")
                    txn_img.put(
                        key=f"{image_id}".encode("utf-8"), value=b64.encode("utf-8")
                    )
                    write_idx += 1
                    if write_idx % 1000 == 0:
                        txn_img.commit()
                        txn_img = env_img.begin(write=True)
                txn_img.put(key=b"num_images", value=f"{write_idx}".encode("utf-8"))
                txn_img.commit()
                env_img.close()
            print(
                f"Finished serializing {write_idx} {split} split images into {lmdb_img}."
            )
        print("done!")

    def _tsv_and_hash_exist_and_match(self):
        """
        检查tsv.hash和tsv文件是否都存在，且哈希值一致。
        """
        if not (os.path.exists(self.tsv_hash_path) and os.path.exists(self.tsv_path)):
            return False
        try:
            with open(self.tsv_hash_path, "r", encoding="utf-8") as f:
                saved_hash = f.read().strip()
            current_hash = self._calc_file_hash(self.tsv_path)
            return saved_hash == current_hash
        except Exception:
            return False

    def _calc_file_hash(self, file_path):
        """
        计算指定文件内容的md5哈希值。
        """
        import hashlib

        hash_md5 = hashlib.md5()
        with open(file_path, "rb") as f:
            for chunk in iter(lambda: f.read(4096), b""):
                hash_md5.update(chunk)
        return hash_md5.hexdigest()

    def _save_tsv_hash(self):
        """
        保存当前tsv文件的哈希值到tsv.hash。
        """
        hash_val = self._calc_file_hash(self.tsv_path)
        with open(self.tsv_hash_path, "w", encoding="utf-8") as f:
            f.write(hash_val)

    def _load_imgid2path(self):
        """
        加载eval_imgs.dict到self.imgid2path。
        """
        if os.path.exists(self.dict_path):
            with open(self.dict_path, "r", encoding="utf-8") as f:
                self.imgid2path = json.load(f)
        else:
            self._init_db()
