import scrapy
from scrapy import Request

from apps.tax_honor.tax_honor.base_tax_honor_mixin import BaseTaxHonorMixin
from apps.tax_honor.tax_honor.items import NetTaxHonorResponseItem, NetTaxHonorItem
from apps.tax_honor.tax_honor.utils.tools import *


class BaseCleanMixin(BaseTaxHonorMixin):
    logger = scrapy.Spider.logger
    company_name_mapping = {
        "企业名称": "company_name",
        "企业(集团)名称": "company_name",
        "详细名称": "company_name",
        "申报单位": "company_name",
        "报送单位": "company_name",
        "单位详细名称": "company_name",
        "名称": "company_name",
        "工厂名称": "company_name",
        "单位名称": "company_name",
        "运营主体名称": "company_name",
        "所在单位/个人": "company_name",
        "企业工业设计中心": "company_name",
        "工业设计企业": "company_name",
        "申报主体单位名称": "company_name",
        "依托企业": "company_name",
        "申报主体（企业名称）": "company_name",
        "主要完成单位": "company_name",
        "申报单位全称": "company_name",
        "申报企业": "company_name",
    }
    other_mapping = {
        "认定类型": "project_name",
        "项目名称": "project_name",
        "补助金额": "amount",
    }

    def parse_spider_detail(self, response, **kwargs):
        url = response.request.url
        self.logger.info(f"开始解析 {url} {kwargs['response_item']}")
        if url.endswith((".xls", ".xlsx", '.et')):
            yield from self.parse_xls(response, **kwargs)
        elif url.endswith(".pdf"):
            yield from self.parse_pdf(response, **kwargs)
        elif url.endswith((".docx", ".doc")):
            yield from self.parse_doc(response, **kwargs)
        elif url.endswith(".zip"):
            yield from self.parse_zip(response, **kwargs)
        else:
            yield from self.parse_html(response, **kwargs)

    def parse_html(self, response, **kwargs):
        response_item = kwargs["response_item"]
        html = response.body.decode("utf-8")
        dfs = pd.read_html(html, encoding="utf-8")
        self.logger.info(f"html {response.request.url} 共获取到 {len(dfs)}个表格 ")
        new_data_dfs = self._extract_df_skip_rows(dfs)
        item_count = 0
        for data_df in new_data_dfs:
            print(data_df.head())
            data_df.rename(columns={**self.company_name_mapping, **self.other_mapping}, inplace=True)
            for idx, row in data_df.iterrows():
                if "company_name" in row.to_dict().keys():
                    if isinstance(row['company_name'], str):
                        row_company_names = [row['company_name']]
                    else:
                        row_company_names = row['company_name'].tolist()
                    for company_name in row_company_names:
                        item_count += 1
                        company_name = str(company_name).replace("\r", "")
                        item = {
                            "province": response_item["province"],
                            "hierarchy": response_item["hierarchy"],
                            "city": response_item["city"],
                            "county": response_item["county"],
                            "document_title": response_item["document_title"],
                            "publish_date": response_item["publish_date"],
                            "decare_department": response_item["decare_department"],
                            "document_url": response_item["document_url"],
                            "attachment_url": response_item["attachment_url"],
                            "deadline": response_item["deadline"],
                            "project_type": None,
                            "project_name": row.to_dict().get("project_name") or response_item["document_title"],
                            "company_name": company_name,
                            "amount": None,
                            "source": response_item["source"],
                        }
                        yield NetTaxHonorItem(**item)

        if item_count:
            response_item["item_count"] = item_count
            yield NetTaxHonorResponseItem(**response_item)

    def parse_zip(self, response, **kwargs):
        response_item = kwargs["response_item"]
        data_dfs = []
        file_path = save_file(response.body, response.request.url)
        self.logger.info(f"zip {Path(file_path).as_posix()} 共获取到 {len(data_dfs)}个表格 ")

    def parse_xls(self, response, **kwargs):
        response_item = kwargs["response_item"]
        file_path = save_file(response.body, response.request.url)
        data_dfs = [pd.read_excel(file_path, dtype=str).replace({np.nan: ""})]
        self.logger.info(f"pdf {Path(file_path).as_posix()} 共获取到 {len(data_dfs)}个表格 ")

        new_data_dfs = self._extract_df_skip_rows(data_dfs)
        item_count = 0

        for data_df in new_data_dfs:
            print(data_df.head())
            data_df.rename(columns={**self.company_name_mapping, **self.other_mapping}, inplace=True)
            for idx, row in data_df.iterrows():
                if "company_name" in row.to_dict().keys():
                    if isinstance(row['company_name'], str):
                        row_company_names = [row['company_name']]
                    else:
                        row_company_names = row['company_name'].tolist()
                    for company_name in row_company_names:
                        item_count += 1
                        company_name = str(company_name).replace("\r", "")
                        item = {
                            "province": response_item["province"],
                            "hierarchy": response_item["hierarchy"],
                            "city": response_item["city"],
                            "county": response_item["county"],
                            "document_title": response_item["document_title"],
                            "publish_date": response_item["publish_date"],
                            "decare_department": response_item["decare_department"],
                            "document_url": response_item["document_url"],
                            "attachment_url": response_item["attachment_url"],
                            "deadline": response_item["deadline"],
                            "project_type": None,
                            "project_name": row.to_dict().get("project_name") or response_item["document_title"],
                            "company_name": company_name,
                            "amount": None,
                            "source": response_item["source"],
                        }
                        yield NetTaxHonorItem(**item)

        if item_count:
            response_item["item_count"] = item_count
            yield NetTaxHonorResponseItem(**response_item)

    def _extract_df_skip_rows(self, data_dfs, max_skip_rows=5):
        new_data_dfs = []
        for idx in range(len(data_dfs)):
            data_df = data_dfs[idx]
            print(data_df.head())

            columns = data_df.columns
            columns = ["".join(str(x).split()) for x in columns]
            if not any(s in a for a in columns for s in self.company_name_mapping.keys()):
                for _ in range(max_skip_rows if len(data_df) > max_skip_rows else 0):
                    columns_tmp = data_df.iloc[_, :].tolist()
                    columns_tmp = ["".join(str(x).split()) for x in columns_tmp]
                    if any(c in str(d) for d in columns_tmp for c in self.company_name_mapping.keys()):
                        data_df.columns = columns_tmp
                        data_df = data_df.iloc[_ + 1:]
                        new_data_dfs.append(data_df)
                        break
                else:
                    if new_data_dfs and len(data_df.columns) == len(new_data_dfs[-1].columns):
                        data_header = pd.DataFrame(data=[columns], columns=new_data_dfs[-1].columns)
                        data_df.columns = new_data_dfs[-1].columns
                        data_df = pd.concat([data_header, data_df], axis=0)
                        new_data_dfs.append(data_df)

            else:
                new_data_dfs.append(data_df)
        return new_data_dfs

    def parse_pdf(self, response, **kwargs):
        response_item = kwargs["response_item"]
        file_path = save_file(response.body, response.request.url)
        links = extract_links_from_pdf(file_path)

        for _, link in links:
            self.logger.info(f"pdf {Path(file_path).as_posix()} 共获取到附件 {link}")
            if "http://172." not in link:
                yield Request(url=link, callback=self.parse_spider_detail, cb_kwargs={"response_item": response_item})

        data_dfs = extract_tables_from_pdf(file_path)
        item_count = 0
        self.logger.info(f"pdf {Path(file_path).as_posix()} 共获取到 {len(data_dfs)}个表格 ")

        new_data_dfs = self._extract_df_skip_rows(data_dfs)

        for data_df in new_data_dfs:
            data_df.rename(columns={**self.company_name_mapping, **self.other_mapping}, inplace=True)
            for idx, row in data_df.iterrows():
                if "company_name" in row.to_dict().keys():
                    if isinstance(row['company_name'], str):
                        row_company_names = [row['company_name']]
                    else:
                        row_company_names = row['company_name'].tolist()
                    for company_name in row_company_names:
                        item_count += 1
                        company_name = str(company_name).replace("\r", "")
                        item = {
                            "province": response_item["province"],
                            "hierarchy": response_item["hierarchy"],
                            "city": response_item["city"],
                            "county": response_item["county"],
                            "document_title": response_item["document_title"],
                            "publish_date": response_item["publish_date"],
                            "decare_department": response_item["decare_department"],
                            "document_url": response_item["document_url"],
                            "attachment_url": response_item["attachment_url"],
                            "deadline": response_item["deadline"],
                            "project_type": None,
                            "project_name": row.to_dict().get("project_name") or response_item["document_title"],
                            "company_name": company_name,
                            "amount": None,
                            "source": response_item["source"],
                        }
                        yield NetTaxHonorItem(**item)

        if item_count:
            response_item["item_count"] = item_count
            yield NetTaxHonorResponseItem(**response_item)

    def parse_doc(self, response, **kwargs):
        # 读取word文档

        response_item = kwargs["response_item"]
        file_path = save_file(response.body, response.request.url)
        links = extract_links_from_docx(file_path)

        for _, link in links:
            self.logger.info(f"doc {Path(file_path).as_posix()} 共获取到附件 {link}")
            if "http://172." not in link:
                yield Request(url=link, callback=self.parse_spider_detail, cb_kwargs={"response_item": response_item})

        data_dfs = read_tables_from_docx(file_path)
        self.logger.info(f"pdf {Path(file_path).as_posix()} 共获取到 {len(data_dfs)}个表格 ")
        item_count = 0
        for data_df in data_dfs:
            print(data_df.head())
            data_df.rename(columns={**self.company_name_mapping, **self.other_mapping}, inplace=True)
            for idx, row in data_df.iterrows():
                if "company_name" in row.to_dict().keys():
                    if isinstance(row['company_name'], str):
                        row_company_names = [row['company_name']]
                    else:
                        row_company_names = row['company_name'].tolist()
                    for company_name in row_company_names:
                        item_count += 1
                        company_name = str(company_name).replace("\r", "")
                        item = {
                            "province": response_item["province"],
                            "hierarchy": response_item["hierarchy"],
                            "city": response_item["city"],
                            "county": response_item["county"],
                            "document_title": response_item["document_title"],
                            "publish_date": response_item["publish_date"],
                            "decare_department": response_item["decare_department"],
                            "document_url": response_item["document_url"],
                            "attachment_url": response_item["attachment_url"],
                            "deadline": response_item["deadline"],
                            "project_type": None,
                            "project_name": row.to_dict().get("project_name") or response_item["document_title"],
                            "company_name": company_name,
                            "amount": None,
                            "source": response_item["source"],
                        }
                        yield NetTaxHonorItem(**item)

        if item_count:
            response_item["item_count"] = item_count
            yield NetTaxHonorResponseItem(**response_item)
