# -*- coding: utf-8 -*-
# standard
import os
import sys
import decimal
from hashlib import md5

# third

# local
_P_PATH =  os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
if _P_PATH not in sys.path:
    sys.path.append(_P_PATH)
from models import *


"""
@Title:   
@File: 导入原始数据集.py
@Author: walle 2023年12日29日 11时51分56秒
@Version: 1.0.0
@Desc: 
"""


def parse_txt(txt_path: str) -> dict:
    """
    解析文本文件返回数据字典
    :param txt_path: _description_
    :return: _description_
    """
    data = {
        "key": "",
        "clips": [],
    }
    begin = None
    end = None
    with open(txt_path, "r", encoding="utf-8") as lines:
        flag = False
        for line in lines:
            line = line.strip()
            if not line:
                continue
            words = line.split(" ")

            if flag:
                if begin is None:
                    begin = re.match(r'\d{6}', words[0]).group()
                else:
                    match = re.match(r'\d{6}', words[0])
                    if match:
                        end = match.group()
                    else:
                        break
            if words[0].lower() == "frame":
                flag = True
                continue
    b = int(begin)
    e = int(end) 
    b = (b * 1000 / 25) / 1000
    e = (e * 1000 / 25) / 1000
    b_str = decimal.Decimal(str(b)).quantize(decimal.Decimal("0.000")).to_eng_string()
    e_str = decimal.Decimal(str(e)).quantize(decimal.Decimal("0.000")).to_eng_string()
    return {"start_time": b_str, "end_time": e_str, "start_frame": begin, "end_frame": end}


class Clip(BaseModel):
    file_name: str = Field(...)
    start_frame: str = Field(...)
    end_frame: str = Field(...)
    start_time: str = Field(...)
    end_time: str = Field(...)

    def contain(self, other: "Clip") -> bool:
        if float(other.start_time) >= float(self.start_time) and float(other.end_time) <= float(self.end_time):
            return True
        else:
            return False
        
    
        

class ClipContainer(BaseModel):
    clips: list[Clip] = Field([])

    def add_clip(self, key: str, new_clip: Clip):
        
        last_matched_clip_index = None
        flag = 0  # 0 表示插入，1表示替换， -1 表示什么也不做
        for i, clip in enumerate(self.clips):
            if clip.contain(new_clip):  # 新的片段被当前片段包含
                flag = -1
                break
            elif new_clip.contain(clip):
                last_matched_clip_index = i
                flag = 1
                break
            elif float(clip.start_time) <= float(new_clip.start_time):  # 新片段比当前片段的开始时间晚,正常
                if float(new_clip.end_time) > float(clip.end_time): # 新片段结束时间比当前片段晚，合理 # 两个片段有交叉，不可
                    if float(new_clip.start_time) >= float(clip.end_time): # 新片段的开始时间是接着当前片段结束时间的，合理
                        last_matched_clip_index = i
                        # 继续下一clip比较，直到结束或者不满足条件
                    else: # 芯片段的开始时间比当前片段的结束时间早，有交叉。
                        if (delta := (int(clip.end_frame) - int(new_clip.start_frame))) > 10:  # 交叉超过10帧的 当前的策略也是保留，但警告
                            logger.warning(f"警告! {key} 新clip({new_clip.file_name})的起止时间({new_clip.start_time} - {new_clip.end_time} 与clips中的第{i}个clip({clip.file_name})的起止时间({clip.start_time} - {clip.end_time}) 交叉了 {clip.end_frame} - {new_clip.start_frame} = {delta} 帧！")
                            last_matched_clip_index = i
                            break
                        else:  # 已有交叉，不用继续处理了
                            last_matched_clip_index = i
                            if (i + 1) != len(self.clips):
                                raise ValueError("好像不是最后一个元素")
                            break
                else: # 这是包含关系。之前处理过。这里出现是有问题
                    raise ValueError(f"新clip({new_clip.start_time}-{new_clip.end_time})包含于之前的clip({clip.start_time} - {clip.end_time}) 之中，但未被正确的处理")
            else:  # 新片段比当前片段的开始时间早，由于之前按照开始时间排序过，出现这个情况说明是有问题的
                raise ValueError(f"新clip的开始时间 {new_clip.start_time} 比clips中的第{i}个clip的开始时间({clip.start_time}) 早，停止比对")
        if flag == 0:
            self.clips.insert(last_matched_clip_index + 1, new_clip) if last_matched_clip_index is not None else self.clips.append(new_clip)
        elif flag == 1:
            self.clips[last_matched_clip_index] = new_clip
        return self.clips


def process_clips(key: str, clips: list[dict]) -> list[dict]:
    """
    整理 clip
    * 检查clip时间是否有交集
    * 如果一个clip是另一个clip的子集，保留超集
    :param clips: _description_
    :return: _description_
    """
    container = ClipContainer()
    for clip in clips:
        clip = Clip(**clip)
        container.add_clip(key, clip)
    return [x.dict() for x in container.clips]


def parse_folder():
    """
    解析目录获取数据
    """
    raw_json = os.path.join(_P_PATH, "test", "raw_source.json")
    if os.path.exists(raw_json):  # 已生成过数据，直接读取
        with open(raw_json, "r", encoding="utf-8") as f:
            data = json.load(f)
    else:  # 从原始目录创建原始数据
        dir_path = "C:\\Users\\Mia\\Downloads\\lrs3_v0.4"
        data = {}  # 从文件夹解析出来的原始数据
        logger.info("开始解析目录")
        for category in tqdm(os.listdir(dir_path)):
            category_path = os.path.join(dir_path, category)
            container = {}
            for key in tqdm(os.listdir(category_path), desc=category):
                clips = []
                key_path = os.path.join(category_path, key)
                for _file_name in os.listdir(key_path):
                    file_path = os.path.join(key_path, _file_name)
                    # print(file_path)
                    clip = parse_txt(file_path)
                    clip['file_name'] = _file_name
                    clips.append(clip)
                clips.sort(key=lambda x: (float(x["start_time"]), float(x["end_time"])), reverse=False)
                container[key] = {"clips": clips}
            data[category] = container
        logger.info("解析目录完成")
        # 生成原始数据以方便下次使用
        json_path = os.path.join(_P_PATH, "test", "raw_source.json")
        with open(json_path, "w", encoding="utf-8") as f:
            json.dump(new_data, f, indent=4)
    # 整理成需要的格式
    new_data = {}  # 整理后包含全体视频的数据
    for _category, value in data.items():
        for key, clip_dict in value.items():
            clip_container = new_data.get(key, {})
            if _category == "trainval":  # 忽略 trainval 下的 clip
                pass
            else:
                if len(clip_container) == 0:
                    clip_container = clip_dict
                else:
                    clip_container["clips"].extend(clip_dict["clips"])
                new_data[key] = clip_container
    
    db_session = new_db_session()
    a_map = {x.key: x.quality for x in db_session.query(MediaFileOrm.key, MediaFileOrm.quality).filter(MediaFileOrm.quality != 0)}
    db_session.close()
    # 重新排序clips, 忽略未下载的视频
    clear_data = []  # 摘去未下载视频后剩下的数据
    for key, clip_dict in new_data.items():
        if key in a_map:
            clip_dict["clips"].sort(key=lambda x: (float(x["start_time"]), float(x["end_time"])), reverse=False)  # 排序 这是 process_clips 之前的必须步骤， 是 process_clips 函数逻辑成立的前提
            # 处理包含和顺序
            new_dict_dict = {}
            new_dict_dict['quality'] = a_map[key]
            new_dict_dict['key'] = key
            new_dict_dict['page_url'] = f"https://www.youtube.com/watch?v={key}"
            clips = process_clips(key, clip_dict["clips"])
            new_dict_dict["clips"] = [{"start_time": x['start_time'], "end_time": x['end_time']} for x in clips]       
            clear_data.append(new_dict_dict)
        else:
            # logger.debug(f"视频 {key} 未下载，忽略")
            pass
    logger.info("重新排序clips完成")
    logger.info("整理成需要的格式完成")
    """
    json 额格式要求为:
    {
        "4bsiMd1zQ3s": {
            "quality": 1080,
            "clips": [
                ...
            ]

        },
        "4bsiMd1z23s": {
            "quality": 1080,
            "clips": [
                ...
            ]

        },
        ...
    }

    """
    clear_data.sort(key=lambda x: (-x["quality"], x["key"]))  # 排序条件中的-号表示倒序
    json_data = {}
    for x in clear_data:
        key = x.pop("key")
        json_data[key] = x
    logger.info("开始写入json文件")
    json_path = os.path.join(_P_PATH, "test", "sorted_medias.json")
    with open(json_path, "w", encoding="utf-8") as f:
        json.dump(json_data, f,  indent=4)
    return new_data
            



def main():
    data = parse_folder()
    logger.info("开始写入数据库")
    db_session = new_db_session()
    old_keys = [x.key for x in db_session.query(MediaFileOrm.key)]
    db_session.query(ClipOrm).delete()  # 清除旧的 clip 记录，以方便重新写入
    for key, media_info in tqdm(data.items(), colour="red"):
        page_url=f"https://www.youtube.com/watch?v={key}"
        if key in old_keys:
            pass
        else:
            media = MediaFileOrm(key=key, page_url=page_url)
            db_session.add(media)
        for clip_info in media_info['clips']:
            start_time = clip_info['start_time']
            end_time = clip_info['end_time']
            clip = ClipOrm(media_key=key, start_time=start_time, end_time=end_time)
            db_session.add(clip)
    db_session.commit()
    db_session.close()
    logger.info("写入数据库完成")


if __name__ == '__main__':
    main()
    pass

