#encoding=utf8
from collections import defaultdict
from time import sleep
from pymediainfo import MediaInfo
import json
from os.path import realpath
from re import findall, sub, split as resplit, finditer
from urllib import request
import base64
from typing import Union
from PIL import Image, ImageDraw
from io import BytesIO
from qrcode import QRCode
"""
Image.Resampling.NEAREST：最近邻插值，速度最快，但可能产生最模糊的结果。
Image.Resampling.BOX：盒式滤波，适用于缩放缩小。
Image.Resampling.BILINEAR：双线性插值，速度和质量的平衡。
Image.Resampling.HAMMING：汉明窗口滤波。
Image.Resampling.BICUBIC：双三次插值，较高质量的缩放。
Image.Resampling.LANCZOS：Lanczos滤波，提供高质量的结果，但速度较慢。
Image.Resampling.AREA：区域平均，适用于缩放放大"""

def multi_mediaread(absolute_path: Union[str]):
    info_dict = defaultdict(str)
    try:
        media_parse = MediaInfo.parse(realpath(absolute_path))
        media_data_dict = json.loads(media_parse.to_json())["tracks"][0]
        print(media_data_dict)
        info_dict["file_suffix"] = media_data_dict["file_extension"]
        if info_dict["file_suffix"].lower() == "flac":
            first_title = media_data_dict["file_name"]
            if "performer" in media_data_dict.keys():
                first_artist = media_data_dict["performer"]
            else:
                first_artist = ""
        elif info_dict["file_suffix"].lower() == "wav":
            first_title = media_data_dict["file_name"]
            second_artist = resplit("\\s*-\\s*", first_title)
            if len(second_artist):
                scdr_airfall = resplit("\\W", second_artist[0])
                if len(scdr_airfall):
                    first_artist = scdr_airfall[0]
                else:
                    first_artist = second_artist[0]
            else:
                first_artist = ""
        else:
            if "title" in media_data_dict.keys():
                first_title = media_data_dict["title"]
            else:
                if "track_name" in media_data_dict.keys():
                    first_title = media_data_dict["track_name"]
                else:
                    first_title = media_data_dict["file_name"]
            if "performer" in media_data_dict.keys():
                first_artist = media_data_dict["performer"]
            else :
                if "album_performer" in media_data_dict.keys():
                    first_artist = media_data_dict["album_performer"]
                else:
                    if "file_name" in media_data_dict.keys():
                        first_artist = resplit("\\W+", media_data_dict["file_name"])[0]
                    else:
                        first_artist = ""
        info_dict["simple_name"] = first_title
        info_dict["media_artist"] = first_artist
        match_prddate = findall("(\\d+(?:\\W+\\d+)+)\\..*", media_data_dict["file_creation_date"])
        if len(match_prddate):
            info_dict["file_prddate"] = match_prddate[0]
        info_dict["read_path"] = sub("\\\\", "/", realpath(absolute_path))
        info_dict["file_prddate"] = match_prddate[0] if len(match_prddate) else None
        initialize_file_size = str(round(float(media_data_dict["file_size"]) / (1024 * 1024), 2))
        if initialize_file_size.find(".") + 1 < 2:
            info_dict["file_size"] = initialize_file_size.ljust(len(initialize_file_size) + 1, "0")
        else:
            info_dict["file_size"] = initialize_file_size
        if "other_overall_bit_rate" in media_data_dict.keys():
            info_dict["file_bitrate"] = sub("\\s+", "", media_data_dict["other_overall_bit_rate"][0])
        else:
            info_dict["file_bitrate"] = "undefin"
        if "duration" in media_data_dict.keys():
            info_dict["file_duration"] = str(round(media_data_dict["duration"] / 1000 / 60, 2))
        else:
            info_dict["file_duration"] = "undefin"
    except BaseException as Error:
        info_dict.clear()
        info_dict["readError"] = str(Error)
    return info_dict

def verify_requests(url: Union[str], _headers: Union[dict, defaultdict]):
    request_url = request.Request(url=url,
                                  headers=_headers)
    open_this = request.urlopen(request_url)
    if open_this.read().get("Content-Length"):
        return True
    else:
        return False

def pixmapCrop(pixmap_byte: Union[bytes],
               expected_size: Union[tuple, list[int, int]],
               expected_rect: Union[list, tuple[int, int, int, int]] = False) -> bytes:
    try:
        source_data = BytesIO(base64.b64decode(pixmap_byte))
        valid_object = Image.open(source_data)
        width = valid_object.width
        height = valid_object.height
        proportion = height / expected_size[1]
        if isinstance(expected_rect, (tuple, list)):
            crop_x = round(expected_rect[0] * proportion)
            crop_y = round(expected_rect[1] * proportion)
            crop_w = round(expected_rect[2] * proportion)
            crop_h = round(expected_rect[3] * proportion)
            rect = (crop_x, crop_y, crop_w + crop_x, crop_h + crop_y)
            valid_object = valid_object.crop(rect)
        else:
            crop_x = (width - height) // 2 if width > height else 0
            crop_y = (height - width) // 2 if width < height else 0
            custom_rect = (crop_x, crop_y, width, height)
            valid_object = valid_object.crop(custom_rect)
        valid_size = (valid_object.width, valid_object.height)
        overall_mask = Image.new("RGBA", valid_size)
        round_mask = Image.new("RGBA", valid_size)
        round_brush = ImageDraw.Draw(round_mask)
        round_brush.ellipse((0, 0, valid_size[0], valid_size[1]), fill=(255, 255, 255, 255))
        round_mask = round_mask.resize(valid_size, Image.Resampling.LANCZOS | Image.Resampling.BICUBIC)
        overall_mask.paste(valid_object, (0, 0), round_mask)
        byteio = BytesIO()
        overall_mask.save(byteio, "png", quality=100, subsampling=0)
        return base64.b64encode(byteio.getvalue())
    except Exception as error:
        return str(error).encode("utf-8")


def pixmapresize(pixmap_byte: Union[bytes],
                 expected_size: Union[tuple, list[int, int]]) -> bytes:
    source_data = BytesIO(base64.b64decode(pixmap_byte))
    valid_object = Image.open(source_data)
    valid_object = valid_object.resize(expected_size, Image.Resampling.HAMMING)# | Image.Resampling.BICUBIC
    byteio = BytesIO()
    valid_object.save(byteio, "png", quality=100, subsampling=0, optimize=True)
    return base64.b64encode(byteio.getvalue())

def qrcode_make(_qr_data:Union[str],
                _save_path: Union[str] = None,
                _fill_color: Union[str] = "IndianRed",
                _back_color: Union[str] = "rgb(245,245,245)",
                _box_border: Union[int] =2,
                _box_size: Union[int] = 5)->str:
    try:
        qrcode_obj = QRCode(box_size=_box_size,
                            border=_box_border)
        qrcode_obj.add_data(_qr_data)
        product_imageobject = qrcode_obj.make_image(back_color=_back_color,
                                                    fill_color=_fill_color)
        byteio = BytesIO()
        product_imageobject.save(byteio, "png")
        base64_zip = base64.b64encode(byteio.getvalue())
        image_bytes = str(base64_zip, "ascii")
        if _save_path:
            product_imageobject.save(_save_path)
        return image_bytes
    except BaseException as qrcodeeError:
        return str(qrcodeeError)

def unic_utf_res(source_data: Union[str]):
    section_datac = []
    globcg_num = 0
    find_iter = [i.start() for i in finditer("\\\\u", source_data)]
    for index, single_place in enumerate(source_data):
        if index in find_iter:
            recycle_count = 3
            while True:
                unicode_char = source_data[index:index + recycle_count]
                base_find_iter = [i.start() for i in finditer("\\\\u", unicode_char)]
                if base_find_iter:
                    if len(base_find_iter) > 1:
                        section_datac.append(single_place)
                        break
                    else:
                        real_index = recycle_count + index
                        try:
                            anly_unicode = unicode_char.encode("utf8").decode("unicode_escape")
                            section_datac.append(anly_unicode)
                            globcg_num = real_index
                            break
                        except:
                            if real_index > len(source_data):
                                section_datac.append(single_place)
                                break
                            else:
                                recycle_count += 1
                else:
                    break
        else:
            if globcg_num:
                if index < globcg_num:
                    if index + 1 == globcg_num:
                        globcg_num = 0
                    continue
            else:
                if findall("[\u4e00-\u9fff]", single_place):
                    utf8_char = f"{single_place.encode('unicode_escape').decode('utf8')}"
                    section_datac.append(utf8_char)
                else:
                    section_datac.append(single_place)
    return "".join(section_datac)
