import os
import logging
import re

import tabula
import pandas as pd

from price_analysis.core.models import IncomingSO
from price_analysis.core.config import settings
from price_analysis.utils.file_support import (
    get_file_path,
    get_pdf_number_of_pages,
    write_ocr_file,
)

logger = logging.getLogger("main")


def recognize_image(file_path: str):
    img = Image(
        src=file_path,
        detect_rotation=False,
    )

    extracted_tables = img.extract_tables()
    print(extracted_tables)

    return extracted_tables


async def recognize_pdf(file_path: str) -> list[dict]:
    logger.info("Начато распознавание PDF файла")
    top, left, bottom, right = 2, 6, 80, 96
    number_of_pages: int = await get_pdf_number_of_pages(file_path)

    first_page: list = tabula.read_pdf(
        file_path,
        pages="1",
        multiple_tables=True,
        area=[top, left, bottom, right],
        relative_area=True,
        lattice=True,
    )

    bottom = 93
    other_page: list = tabula.read_pdf(
        file_path,
        pages=f"2-{number_of_pages}",
        multiple_tables=True,
        area=[top, left, bottom, right],
        relative_area=True,
        lattice=True,
    )

    pd_first_page = pd.DataFrame()
    pd_other_page = pd.DataFrame()

    header = [
        "Position",
        "Description",
        "Model",
        "Index",
        "Provider",
        "Units",
        "Quantity",
        "MassOfUnits",
        "Note",
    ]

    pd_first_page = create_dataframe(pd_first_page, first_page, header)
    pd_other_page = create_dataframe(pd_other_page, other_page, header)

    pd_single = pd.concat([pd_first_page, pd_other_page])

    pd_single = pd_single.dropna(subset=["Quantity"])
    pd_single = pd_single.reset_index(drop=True)
    value = {
        "Position": 0,
        "Description": "",
        "Model": "",
        "Index": "",
        "Provider": "",
        "Units": "",
        "Quantity": 0,
        "MassOfUnits": "",
        "Note": "",
    }
    pd_single = pd_single.fillna(value=value)
    pd_grouped = (
        pd_single.groupby(["Description", "Model", "Index", "Provider", "Units"])[
            "Quantity"
        ]
        .sum()
        .reset_index()
    )

    logger.info("Выдается словарь с распознанными данными")

    data = pd_grouped.to_dict(orient="records")

    return remove_line_breaker(data)


def create_dataframe(
    pd_frame: pd.DataFrame, df: list, header: list[str]
) -> pd.DataFrame:
    for item in df:
        pd_frame = pd.concat([pd_frame, item])
    pd_frame.columns = header
    return pd_frame


async def recognize_main(files_list: list[IncomingSO]) -> None:
    supported_formats: list[str] = settings.get_support_formats_list()
    supported_formats.remove("pdf")

    for file in files_list:
        file_extension: str = file.file_name.split(".")[-1]
        file_path: str = get_file_path(file_id=file.id, file_name=file.file_name)
        file_name: str = settings.incoming_filename.format(
            id=file.id, file_name=file.file_name
        )
        full_ocr_file_path = os.path.join(settings.ocr_data_path, file_name + ".json")

        section = {"title": file.section_type, "cipher": file.section_name}

        if os.path.isfile(full_ocr_file_path):
            logger.info("Файл с распознанными данными уже существует")
            continue

        if file_extension.lower() == "pdf":
            data: list[dict] = await recognize_pdf(file_path)
            content: dict = {"section": section, "data": data}
            await write_ocr_file(full_ocr_file_path, content)
            logger.info(f"Файл {file_name} распознан и записан")

        if file_extension.lower() in supported_formats:
            data: pd.DataFrame = pd.DataFrame()


def remove_line_breaker(data: list[dict]) -> list[dict]:
    for row in data:
        for key, value in row.items():
            if isinstance(value, str):
                row[key] = re.sub(r"^\s+|\n|\r|\s+$", "", value)
    return data
