import asyncio
import copy
import datetime
import json
import traceback

from apps.database import get_ods, save_ods_result, save_ods, get_ods_result_by_id, save_dwd, \
    update_ods
from apps.models.BaseModel import OdsResultModel, UpdateDwdModel
from apps.models.QueryModel import QuerySchema, QueryDetail
from apps.models.ResponseModel import err_info
from apps.task.parse_works import ParseWorks
from apps.utils.MinioBoto3 import S3Util


async def run():
    # mongo.init(configs.MONGO_URI)
    # async for item in get_task():
    item = [
        r"C:\Users\Administrator\Documents\WXWork\1688853051339318\Cache\File\2024-03\20240301（含C0）\Ａ14451%明光宗貞皇帝實錄.txt"]
    try:
        bools, results = await parse_task(item)
        # print(bools)
        print(json.dumps(results, ensure_ascii=False))
        return results
    except:
        traceback.print_exc()
        raise Exception("save_ods 保存状态错误")


async def get_task():
    qd = QueryDetail(field_name='source_type', field_value=4, link='and', oparation='eq')
    qd1 = QueryDetail(field_name='status', field_value=0, link='and', oparation='eq')
    query = QuerySchema(
        query=[qd, qd1],
        limit=100
    )
    results = await get_ods(query.get_filter(),
                            query.get_projection(),
                            query.get_sort_by(),
                            query.skip,
                            query.limit)
    for result in results:
        yield result


async def read_file(file_list):
    all_file_contents = ""
    list_count = []
    file_lists = sorted(file_list)
    for file_name in file_lists:
        with open(file_name, 'r', encoding='utf-8') as file:
            # 读取文件的全部内容
            file_content = file.read()
        all_file_contents = all_file_contents + file_content
        # 减一是因为合并后最后一行是没有的
        list_count.append((len(file_content.split("\n")) - 1, file_name))

    all_count = sum(i for i, v in list_count)
    # 加一 是因为合并后始终会有最后一个空行
    if len(file_lists) > 1 and all_count + 1 != len(all_file_contents.split('\n')):
        raise Exception("合并文件长度问题")
    return all_file_contents, list_count, True


async def parse_task(item):
    pw = ParseWorks()
    all_file_contents, list_count, bools = await read_file(item)
    if not bools:
        return False, all_file_contents
    pw.set_inputs(all_file_contents)
    pw.set_file_info(list_count)
    try:
        item = {
            "_id": "10011704696247363197+1",
            "lngid": "10011704696247363197+1",
            "rawid": "2024010800022+1",
            "process_id": "1704696247362759+1",
            "source_type": 4,
            "type_id": 2,
            "file_name": [
                "Ａ14301%憨山大師夢遊全集-001.txt",

            ],
            "latest_date": "20240109202100805615",
            "upload_time": datetime.datetime.now(),
            "status": 0,
            "error_info": "2024-01-09 20:21:00-通过质检"
        }
        pw.data_parse(item)
        result = pw.get_parse_result()
        return True, result
    except Exception as e:
        from apps.task.works_one_parse import WorksOneParse
        dicts = err_info()
        dicts["err_msg"] = repr(e)
        dicts["处理到行"] = str(pw.wop.now_deal_index_line)
        dicts["已处理长度"] = str(pw._finish_deal_len)
        if pw.wop.now_deal_index_line:
            # 获取 单个 作者内部的index 号
            if index_line := pw.wop.now_deal_index_line[-1]:
                # 算出总行号
                file_in_count = index_line[0] + pw._finish_deal_len + 1  # +1 是因为 行与 index 之间相差1
                # 获取文件行号的位置
                index, filename = pw.get_count_in_file(file_in_count)
                dicts["处理位置"] = f"处理到行:{index}, 在文件{filename}"
        dicts["traceback"] = traceback.format_exc()
        pw.wop.now_deal_index_line.clear()
        return False, dicts


async def run_dwd():
    cclp_ods = await run()
    print(cclp_ods)
    # 将列表转换成 字典 key为 chapter_id
    id_title_info_dicts = {}
    for title_info_one in cclp_ods["title_info"]:
        chapter_id = title_info_one["chapter_id"]
        id_title_info_dicts[chapter_id] = title_info_one
    is_all_true = True
    for item in cclp_ods["title_info_c1group"]:
        c1_title_info = None
        for c in item:
            title_info_cn = id_title_info_dicts[c]
            if title_info_cn["chapter_raw_type"] == 'Ｃ1':
                c1_title_info = title_info_cn
        temp_c1_title_info = copy.deepcopy(c1_title_info)
        temp_c1_title_info["work_id"] = cclp_ods["rawid"]
        dwd_date = UpdateDwdModel(
            m_id=temp_c1_title_info["chapter_id"] + "_" + cclp_ods["latest_date"],
            ods_id=cclp_ods["_id"],
            user_id=0,
            keyid=temp_c1_title_info["chapter_id"],
            rawid=cclp_ods["rawid"],
            process_id=cclp_ods["process_id"],
            source_type=cclp_ods["source_type"],
            type_id=cclp_ods["type_id"],
            is_deprecated=0,
            latest_date=cclp_ods["latest_date"],
            update_date=datetime.datetime.now().strftime("%Y%m%d%H%M%S%f"),
            title_info=[temp_c1_title_info]
        )
        for c in item:
            title_info_cn = id_title_info_dicts[c]
            if title_info_cn["chapter_raw_type"] != 'Ｃ1':
                temp_cn_title_info = copy.deepcopy(title_info_cn)
                temp_cn_title_info["work_id"] = cclp_ods["rawid"]
                dwd_date.title_info.append(temp_cn_title_info)

        dicts = dwd_date.get_dict()
        print(json.dumps(dicts, ensure_ascii=False))


async def run_task():
    # await run()
    await run_dwd()


if __name__ == "__main__":
    asyncio.run(run_task())
