import json
import copy
import logging
import re
import uuid
from datetime import datetime
from functools import partial
from io import BytesIO

import numpy as np
import requests
import xxhash

from api import settings
from api.db import LLMType
from api.db.db_models import close_connection
from api.db.services.dialog_service import keyword_extraction, question_proposal
from api.db.services.document_service import DocumentService
from api.db.services.file2document_service import File2DocumentService
from api.db.services.image_service import ImageService
from api.db.services.llm_service import LLMBundle
from api.db.services.task_service import TaskService
from constant.constants import ParserType
from graphrag.utils import get_llm_cache, set_llm_cache
from rag.app import naive, manual, table, book, email, audio, one, qa, picture
from timeit import default_timer as timer

from rag.app import presentation, paper
from rag.nlp import search, rag_tokenizer

from rag.settings import DOC_MAXIMUM_SIZE
from rag.utils.storage_factory import STORAGE_IMPL
from peewee import DoesNotExist

FACTORY = {
    "general": naive,
    ParserType.NAIVE.value: naive,
    ParserType.PAPER.value: paper,
    ParserType.BOOK.value: book,
    ParserType.PRESENTATION.value: presentation,
    ParserType.MANUAL.value: manual,
    ParserType.QA.value: qa,
    ParserType.TABLE.value: table,
    ParserType.ONE.value: one,
    ParserType.AUDIO.value: audio,
    ParserType.EMAIL.value: email,
    ParserType.PICTURE.value: picture,
}


def do_handle_task(task):
    task_id = task["id"]
    task_from_page = task["from_page"]
    task_to_page = task["to_page"]
    task_tenant_id = task["tenant_id"]
    task_embedding_id = task["embd_id"]
    task_language = task["language"]
    task_llm_id = task["llm_id"]
    task_dataset_id = task["kb_id"]
    task_doc_id = task["doc_id"]
    task_document_name = task["name"]
    task_parser_config = task["parser_config"]

    # prepare the progress callback function
    progress_callback = partial(set_progress, task_id, task_from_page, task_to_page)

    try:
        task_canceled = TaskService.do_cancel(task_id)
    except DoesNotExist:
        logging.warning(f"task {task_id} is unknown")
        return
    if task_canceled:
        return

    try:
        # bind embedding model，绑定embedding模型
        embedding_model = LLMBundle(task_tenant_id, LLMType.EMBEDDING, llm_name=task_embedding_id, lang=task_language)
    except Exception as e:
        error_message = f'Fail to bind embedding model: {str(e)}'
        logging.exception(error_message)
        raise

    # Standard chunking methods
    start_ts = timer()
    chunks = build_chunks(task, progress_callback)

    print("切片完成后的数据为：", chunks)
    logging.info("Build document {}: {:.2f}s".format(task_document_name, timer() - start_ts))
    if chunks is None:
        return
    if not chunks:
        progress_callback(1., msg=f"No chunk built from {task_document_name}")
        logging.info(f"No chunk built from {task_document_name}")
        return
    # TODO: exception handler
    logging.info("Generate {} chunks".format(len(chunks)))
    progress_callback(msg="Generate {} chunks".format(len(chunks)))
    start_ts = timer()

    # 提取QA入向量库
    print("start generate qa...")
    gen_qa(task, chunks, embedding_model, progress_callback)

    try:
        # 执行向量化
        token_count, vector_size = embedding(chunks, embedding_model, task_parser_config, progress_callback)
    except Exception as e:
        error_message = "Generate embedding error:{}".format(str(e))
        progress_callback(-1, error_message)
        logging.exception(error_message)
        token_count = 0
        raise

    progress_message = "Embedding chunks ({:.2f}s)".format(timer() - start_ts)
    logging.info(progress_message)
    progress_callback(msg=progress_message)

    # 切片数据入库
    init_kb(task, vector_size)
    chunk_count = len(set([chunk["id"] for chunk in chunks]))
    start_ts = timer()
    es_bulk_size = 4
    for b in range(0, len(chunks), es_bulk_size):
        doc_store_result = settings.docStoreConn.insert(chunks[b:b + es_bulk_size],
                                                        search.index_name(task_tenant_id), task_dataset_id)
        if b % 128 == 0:
            progress_callback(prog=0.8 + 0.1 * (b + 1) / len(chunks), msg="")
            print("prog is:{}".format(0.8 + 0.1 * (b + 1) / len(chunks)))
        if doc_store_result:
            error_message = f"Insert chunk error: {doc_store_result}, please check log file and Elasticsearch/Infinity status!"
            progress_callback(-1, msg=error_message)
            raise Exception(error_message)
        chunk_ids = [chunk["id"] for chunk in chunks[:b + es_bulk_size]]
        chunk_ids_str = " ".join(chunk_ids)
        try:
            TaskService.update_chunk_ids(task["id"], chunk_ids_str)
        except DoesNotExist:
            logging.warning(f"do_handle_task update_chunk_ids failed since task {task['id']} is unknown.")
            doc_store_result = settings.docStoreConn.delete({"id": chunk_ids}, search.index_name(task_tenant_id),
                                                            task_dataset_id)
            return
    logging.info("Indexing doc({}), page({}-{}), chunks({}), elapsed: {:.2f}".format(task_document_name, task_from_page,
                                                                                     task_to_page, len(chunks),
                                                                                     timer() - start_ts))

    DocumentService.increment_chunk_num(task_doc_id, task_dataset_id, token_count, chunk_count, 0)

    time_cost = timer() - start_ts
    progress_callback(prog=1.0, msg="Done ({:.2f}s)".format(time_cost))
    logging.info("Chunk doc({}), page({}-{}), chunks({}), token({}), elapsed:{:.2f}".format(task_document_name,
                                                                                            task_from_page,
                                                                                            task_to_page,
                                                                                            len(chunks),
                                                                                            token_count, time_cost))


def gen_qa(task, chunks, embedding_model, progress_callback):
    # 调用QA服务
    qa_url = "http://110.122.0.36:5000/qa_gen"
    headers = {"Content-Type": "application/json"}
    payload = {
        "temperature": 0.1,
        "top_p": 0.7,
        "max_token": 5120,
        "stream": False
    }
    prompts = []
    for cks in chunks:
        content = cks["content_with_weight"]
        prompts.append({"role": "user", "content": content})
    payload["prompts"] = prompts

    start_ts = timer()
    try:
        response = requests.post(qa_url, data=json.dumps(payload), headers=headers)
        if response.status_code == 200:
            data = response.json()
            qas_list = data.get('QAs', [])
            for _qa in qas_list:
                print(f"Question: {_qa['Q']}, Answer: {_qa['A']}")
        else:
            print(f"call qa gen failed, error code: {response.status_code}, error info：{response.text}")
            raise Exception(response.text)

        # 创建qa向量库
        qa_idx = init_qa_kb(task, 1024)

        question_list = [qa['Q'] for qa in qas_list if 'Q' in qa]
        # 对切片问题列表进行向量化
        qa_vec = embedding_qs(task, qas_list, question_list, embedding_model)

        bulk_size = 4
        for idx in range(0, len(qa_vec), bulk_size):
            qa_store_res = settings.docStoreConn.insert(qa_vec[idx:idx + bulk_size], qa_idx, task["kb_id"])
            if qa_store_res:
                progress_callback(-1, msg=qa_store_res)
                print(f"insert qa error:{qa_store_res}")
                raise Exception(f"insert qa error:{qa_store_res}")

        print("generate qa cost: {:.2f}s".format(timer() - start_ts))
    except Exception as e:
        print("generate qa error: {}".format(str(e)))


def embedding_qs(task, qas_list, question_list, embedding_model):
    cnts_ = np.array([])
    batch_size = 16
    for i in range(0, len(question_list), batch_size):
        vts, c = embedding_model.encode(question_list[i:i + batch_size])
        if len(cnts_) == 0:
            cnts_ = vts
        else:
            cnts_ = np.concatenate((cnts_, vts), axis=0)

    res = []
    assert len(cnts_) == len(question_list)
    assert len(qas_list) == len(question_list)
    for i, d in enumerate(question_list):
        qa_dict = {}
        v = cnts_[i].tolist()
        doc_id = xxhash.xxh64(
            (qas_list[i]["A"] + str(uuid.uuid4())).encode("utf-8")).hexdigest()
        qa_dict["id"] = doc_id
        qa_dict["doc_id"] = task["doc_id"]
        qa_dict["kb_id"] = task["kb_id"]
        qa_dict["question_kwd"] = d
        qa_dict["content_with_weight"] = qas_list[i]["A"]
        qa_dict[f"q_{len(v)}_vec"] = v
        res.append(qa_dict)
    return res


class TaskCanceledException(Exception):
    def __init__(self, msg):
        self.msg = msg


def get_storage_binary(bucket, name):
    return STORAGE_IMPL.get(bucket, name)


def build_chunks(task, progress_callback):
    if task["size"] > DOC_MAXIMUM_SIZE:
        set_progress(task["id"], prog=-1, msg="File size exceeds( <= %dMb )" %
                                              (int(DOC_MAXIMUM_SIZE / 1024 / 1024)))
        return []
    chunker = FACTORY[task["parser_id"].lower()]
    try:
        st = timer()
        bucket, name = File2DocumentService.get_storage_address(doc_id=task["doc_id"])
        binary = get_storage_binary(bucket, name)
        print("read binary from minio:({}) {}/{}".format(timer() - st, task["location"], task["name"]))
    except TimeoutError:
        logging.exception(
            "Minio {}/{} got timeout: Fetch file from minio timeout.".format(task["location"], task["name"]))
        raise
    except Exception as e:
        if re.search("(No such file|not found)", str(e)):
            logging.exception("Can not find file {} from minio. Could you try it again?".format(task["name"]))
        else:
            logging.exception("Get file from minio: {}".format(str(e).replace("'", "")))
        logging.exception("Chunking {}/{} got exception".format(task["location"], task["name"]))
        raise

    try:
        cks = chunker.chunk(task["name"], binary=binary, from_page=task["from_page"],
                            to_page=task["to_page"], lang=task["language"], callback=progress_callback,
                            kb_id=task["kb_id"], parser_config=task["parser_config"], tenant_id=task["tenant_id"],
                            llm_id=task["llm_id"])
        print("cks为：", cks)
        print("Chunking({}) {}/{} done".format(timer() - st, task["location"], task["name"]))
    except TaskCanceledException:
        raise
    except Exception as e:
        logging.exception("Chunking {}/{} got exception".format(task["location"], task["name"]))
        raise

    docs = []
    doc = {
        "doc_id": task["doc_id"],
        "kb_id": str(task["kb_id"])
    }
    if task["pagerank"]:
        doc["pagerank_fea"] = int(task["pagerank"])
    el = 0
    for ck in cks:
        d = copy.deepcopy(doc)
        d.update(ck)
        d["id"] = xxhash.xxh64(
            (ck["content_with_weight"] + str(d["doc_id"]) + str(uuid.uuid4())).encode("utf-8")).hexdigest()
        d["create_time"] = str(datetime.now()).replace("T", " ")[:19]
        d["create_timestamp_flt"] = datetime.now().timestamp()
        # 如果切片中image为空，不进行桶存储和数据库记录
        if not d.get("image"):
            _ = d.pop("image", None)
            d["img_id"] = ""
            docs.append(d)
            continue

        try:
            output_buffer = BytesIO()
            if isinstance(d["image"], bytes):
                output_buffer = BytesIO(d["image"])
            else:
                d["image"].save(output_buffer, format='JPEG')

            st = timer()
            STORAGE_IMPL.put(task["kb_id"], d["id"], output_buffer.getvalue())

            # 如果image中存在这张图片的源信息，先删除。识别的图片源信息记录到数据库中。
            image = d.get("image")
            # 切片图片记录数据库
            id_ = d.get("id")
            ImageService.delete_by_id(id_)
            ImageService.insert(**create_image_dict(d, task, image))
            print("insert into image db success.")
            el += timer() - st
        except Exception:
            logging.exception(
                "Saving image of chunk {}/{}/{} got exception".format(task["location"], task["name"], d["id"]))
            raise
        d["img_id"] = "{}-{}".format(task["kb_id"], d["id"])
        del d["image"]
        docs.append(d)
    print("MINIO PUT:({}):{}".format(task["name"], el))

    # 关键词提取
    if task["parser_config"].get("auto_keywords", 0):
        st = timer()
        progress_callback(msg="Start to generate keywords for every chunk ...")
        chat_mdl = LLMBundle(task["tenant_id"], LLMType.CHAT, llm_name=task["llm_id"], lang=task["language"])
        for d in docs:
            cached = get_llm_cache(chat_mdl.llm_name, d["content_with_weight"], "keywords",
                                   {"topn": task["parser_config"]["auto_keywords"]})
            if not cached:
                cached = keyword_extraction(chat_mdl, d["content_with_weight"],
                                            task["parser_config"]["auto_keywords"])
                if cached:
                    set_llm_cache(chat_mdl.llm_name, d["content_with_weight"], cached, "keywords",
                                  {"topn": task["parser_config"]["auto_keywords"]})

            d["important_kwd"] = cached.split(",")
            d["important_tks"] = rag_tokenizer.tokenize(" ".join(d["important_kwd"]))
            print("切片：{}，提取的关键词为：{}".format(d["content_with_weight"], d["important_kwd"]))
        progress_callback(msg="Keywords generation completed in {:.2f}s".format(timer() - st))

    # 根据切片生成问题
    task["parser_config"]["auto_questions"] = 3
    if task["parser_config"].get("auto_questions", 0):
        st = timer()
        progress_callback(msg="Start to generate questions for every chunk ...")
        chat_mdl = LLMBundle(task["tenant_id"], LLMType.CHAT, llm_name=task["llm_id"], lang=task["language"])
        for d in docs:
            cached = get_llm_cache(chat_mdl.llm_name, d["content_with_weight"], "question",
                                   {"topn": task["parser_config"]["auto_questions"]})
            if not cached:
                cached = question_proposal(chat_mdl, d["content_with_weight"], task["parser_config"]["auto_questions"])
                if cached:
                    set_llm_cache(chat_mdl.llm_name, d["content_with_weight"], cached, "question",
                                  {"topn": task["parser_config"]["auto_questions"]})

            d["question_kwd"] = cached.split("\n")
            d["question_tks"] = rag_tokenizer.tokenize("\n".join(d["question_kwd"]))
        progress_callback(msg="Question generation completed in {:.2f}s".format(timer() - st))

    return docs


def create_image_dict(d, task, image):
    # 桶中存储图片路径=bucket_id-id
    id_ = d.get("id")
    doc_name = d.get("docnm_kwd")
    bucket_id = task.get("kb_id")
    doc_id = task["doc_id"]
    image_mode = image.mode
    image_size = image.size
    image_format = image.format
    img_dict = {
        "id": id_,
        "bucket_id": bucket_id,
        "doc_id": doc_id,
        "doc_name": doc_name,
        "image_mode": image_mode,
        "image_size": image_size,
        "image_format": image_format
    }
    return img_dict


def embedding(docs, mdl, parser_config=None, callback=None):
    if parser_config is None:
        parser_config = {}
    batch_size = 16
    tts, cnts = [], []
    for d in docs:
        tts.append(d.get("docnm_kwd", "Title"))
        c = "\n".join(d.get("question_kwd", []))
        if not c:
            c = d["content_with_weight"]
        c = re.sub(r"</?(table|td|caption|tr|th)( [^<>]{0,12})?>", " ", c)
        if not c:
            c = "None"
        cnts.append(c)

    tk_count = 0
    if len(tts) == len(cnts):
        tts_ = np.array([])
        for i in range(0, len(tts), batch_size):
            vts, c = mdl.encode(tts[i: i + batch_size])
            if len(tts_) == 0:
                tts_ = vts
            else:
                tts_ = np.concatenate((tts_, vts), axis=0)
            tk_count += c
        tts = tts_

    cnts_ = np.array([])
    for i in range(0, len(cnts), batch_size):
        vts, c = mdl.encode(cnts[i: i + batch_size])
        if len(cnts_) == 0:
            cnts_ = vts
        else:
            cnts_ = np.concatenate((cnts_, vts), axis=0)
        tk_count += c
    cnts = cnts_

    title_w = float(parser_config.get("filename_embd_weight", 0.1))
    vects = (title_w * tts + (1 - title_w) *
             cnts) if len(tts) == len(cnts) else cnts

    assert len(vects) == len(docs)
    vector_size = 0
    for i, d in enumerate(docs):
        v = vects[i].tolist()
        vector_size = len(v)
        d["q_%d_vec" % len(v)] = v
    return tk_count, vector_size


def init_kb(row, vector_size: int):
    idxnm = search.index_name(row["tenant_id"])
    return settings.docStoreConn.createIdx(idxnm, row.get("kb_id", ""), vector_size)


def init_qa_kb(task, vector_size: int):
    tenant_id = task["tenant_id"]
    idxnm = f"ragflow_qa_{tenant_id}"
    settings.docStoreConn.createIdx(idxnm, task.get("kb_id", ""), vector_size)
    return idxnm


def set_progress(task_id, from_page=0, to_page=-1, prog=None, msg="Processing..."):
    # global PAYLOAD
    if prog is not None and prog < 0:
        msg = "[ERROR]" + msg
    try:
        cancel = TaskService.do_cancel(task_id)
    except DoesNotExist:
        logging.warning(f"set_progress task {task_id} is unknown")
        # if PAYLOAD:
        #     PAYLOAD.ack()
        #     PAYLOAD = None
        return

    if cancel:
        msg += " [Canceled]"
        prog = -1

    if to_page > 0:
        if msg:
            msg = f"Page({from_page + 1}~{to_page + 1}): " + msg
    if msg:
        msg = datetime.now().strftime("%H:%M:%S") + " " + msg
    d = {"progress_msg": msg}
    if prog is not None:
        d["progress"] = prog

    logging.info(f"set_progress({task_id}), progress: {prog}, progress_msg: {msg}")
    try:
        TaskService.update_progress(task_id, d)
    except DoesNotExist:
        logging.warning(f"set_progress task {task_id} is unknown")
        # if PAYLOAD:
        #     PAYLOAD.ack()
        #     PAYLOAD = None
        return

    close_connection()
    # if cancel and PAYLOAD:
    #     PAYLOAD.ack()
    #     PAYLOAD = None
    #     raise TaskCanceledException(msg)
