#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time    : 2024/1/30 16:29
# @Author  : 王凯
# @File    : base_tax_policy_spider.py
# @Project : scrapy_spider
import math
import re
import urllib
import urllib.parse
from typing import Any

import scrapy
from bs4 import BeautifulSoup
from gerapy_auto_extractor import extract_list, is_list
from gne import GeneralNewsExtractor
from scrapy.http import Response
from scrapy.settings import BaseSettings

from utils.tools import replace_str

CONTENT_URL_ALLOW = [".html", ".htm", ".shtm"]


class BaseTaxPolicySpider(scrapy.Spider):
    custom_settings = {"DOWNLOAD_TIMEOUT": 10, "LOG_LEVEL": "INFO"}
    other_custom_settings = {}
    url: str = ''
    page_size: int = 500
    page_range: int = 1
    Request = scrapy.Request
    FormRequest = scrapy.FormRequest

    @classmethod
    def update_settings(cls, settings: BaseSettings) -> None:
        cls.custom_settings.update(cls.other_custom_settings)
        settings.setdict(cls.custom_settings or {}, priority="spider")

    # ################################################################################################################
    @staticmethod
    def parse_title_and_publish_time_by_gen(html, with_body_html=False, **kwargs):
        """
        {
          'title': title,
         'author': author,
         'publish_time': publish_time,
         'content': content[0][1]['text'],
         'images': content[0][1]['images']
         }
        """
        extractor = GeneralNewsExtractor()
        return extractor.extract(html, with_body_html=with_body_html, **kwargs)

    @staticmethod
    def parse_list_from_html_by_gen(html, **kwargs):
        if is_list(html, **kwargs):
            link_list = extract_list(html, **kwargs)
            return link_list
        return []

    def parse(self, response: Response, **kwargs: Any) -> Any:
        pass

    def gen_request_by_index_plus(
        self, url, total_page_xpath, list_a_xpath: str or list, callback, auto_next=True, total_page_xpath_reg=None
    ):
        yield self.Request(
            url,
            callback=self.gen_request_by_index_plus_mid,
            cb_kwargs={
                "total_page_xpath": total_page_xpath,
                "list_a_xpath": list_a_xpath,
                "callback": callback,
                "auto_next": auto_next,
                "total_page_xpath_reg": total_page_xpath_reg,
                "url": url,
            },
        )

    def gen_request_by_index_plus_mid(self, response, **kwargs):
        total_page_xpath = kwargs.get("total_page_xpath")
        list_a_xpath = kwargs.get("list_a_xpath")
        callback = kwargs.get("callback")
        auto_next = kwargs.get("auto_next")
        total_page_xpath_reg = kwargs.get("total_page_xpath_reg")
        url = kwargs.get("url")
        yield from self.parse_response_by_index_plus(response, callback=callback, list_a_xpath=list_a_xpath)
        if not total_page_xpath_reg:
            total_page_xpath_reg = r"共\s*(\d+)\s*页"
        total = response.xpath(total_page_xpath).re_first(total_page_xpath_reg)
        if total and auto_next:
            if "pgCountMode: 'recordCount" in response.text:
                total = int(total) // 10 + 1
            for i in range(1, int(total)):
                self.logger.info(f"开始爬取 第 {i} 页 params: {i}")
                yield response.follow(
                    url.replace("index.", f"index_{i}."),
                    callback=self.parse_response_by_index_plus,
                    cb_kwargs={"callback": callback, "list_a_xpath": list_a_xpath},
                )

    def parse_response_by_index_plus(self, response, **kwargs):
        list_a_xpath = kwargs.get("list_a_xpath")
        callback = kwargs.get("callback")
        if isinstance(list_a_xpath, str):
            list_a_xpath = [list_a_xpath]
        for xpath in list_a_xpath:
            # print('url:', len(response.xpath(xpath)))
            for url in response.xpath(xpath):
                url = url.xpath("./@href").get()
                if url:
                    if any(c in url for c in [".doc", ".pdf", ".xls"]):
                        pass
                    else:
                        if any(c in url for c in CONTENT_URL_ALLOW):
                            yield response.follow(url=url, callback=callback)

    def process_content(self, content):
        content = replace_str(content, "<!--(.|\n)*?-->")
        soup = BeautifulSoup(content, "html.parser")
        for tag in soup.find_all():
            if tag.name in ["img", "script", "style"]:
                tag.decompose()
            else:
                if not tag.attrs:
                    continue
                for j in ["style", "class", "id"]:
                    if j in tag.attrs:
                        del tag[j]
                if tag.name == "a":
                    if "href" in tag.attrs:
                        href = tag["href"]
                        # new_href = self.replace_oss_url(href)
                        new_href = href  # todo
                        tag["href"] = new_href
        content = soup.decode()
        return content

    def gen_request_by_index_create_page_html(
        self,
        url,
        next_page_href_xpath,
        list_a_xpath,
        callback,
        auto_next=True,
        end_tail="index",
        total_page_xpath_reg=None,
    ):
        yield self.Request(
            url,
            callback=self.gen_request_by_index_create_page_html_mid,
            cb_kwargs={
                "url": url,
                "next_page_href_xpath": next_page_href_xpath,
                "list_a_xpath": list_a_xpath,
                "callback": callback,
                "auto_next": auto_next,
                "end_tail": end_tail,
                "total_page_xpath_reg": total_page_xpath_reg,
            },
        )

    def gen_request_by_index_create_page_html_mid(self, response, **kwargs):
        url = kwargs.get("url")
        next_page_href_xpath = kwargs.get("next_page_href_xpath")
        list_a_xpath = kwargs.get("list_a_xpath")
        callback = kwargs.get("callback")
        auto_next = kwargs.get("auto_next")
        end_tail = kwargs.get("end_tail")
        total_page_xpath_reg = kwargs.get("total_page_xpath_reg")
        yield from self.parse_response_by_index_plus(response, callback=callback, list_a_xpath=list_a_xpath)
        if not total_page_xpath_reg:
            total_page_xpath_reg = r"createPageHTML\([\"\']?(\d+)[\"\']?,"
        total = response.xpath(".").re_first(total_page_xpath_reg)
        if total and auto_next:
            for i in range(1, int(total) + 1):
                url_temp = url.replace(f"{end_tail}.", f"{end_tail}_{i}.")
                self.logger.info(f"开始爬取 第 {i} 页 url: {url_temp}")
                yield response.follow(
                    url_temp,
                    callback=self.parse_response_by_index_plus,
                    cb_kwargs={
                        "callback": callback,
                        "list_a_xpath": list_a_xpath,
                    },
                )

    def gen_request_by_api_ajax_list(
        self,
        url,
        data=None,
        list_a_xpath=None,
        parse_response=None,
        end_tail=None,
        callback=None,
        auto_next=True,
    ):
        yield self.Request(
            url,
            body=data,
            method="POST",
            callback=self.gen_request_by_api_ajax_list_mid,
            cb_kwargs={
                "list_a_xpath": list_a_xpath,
                "data": data,
                "parse_response": parse_response,
                "end_tail": end_tail,
                "callback": callback,
                "auto_next": auto_next,
                "url": url,
            },
        )

    def gen_request_by_api_ajax_list_mid(self, response, **kwargs):
        list_a_xpath = kwargs.get("list_a_xpath")
        data = kwargs.get("data")
        parse_response = kwargs.get("parse_response")
        end_tail = kwargs.get("end_tail")
        callback = kwargs.get("callback")
        auto_next = kwargs.get("auto_next")
        url = kwargs.get("url")
        if not parse_response:
            parse_response = self.parse_response_by_api_ajax_list

        total_page = math.ceil(response.json()["total"] / 20)
        self.logger.info(f"开始爬取 第 1 页/ 共 {total_page} 页 data: {data}")
        yield from parse_response(response, callback=callback, list_a_xpath=list_a_xpath)
        if total_page and auto_next:
            total_page = int(total_page)
            for i in range(1, total_page + 1):
                url_temp = url
                end_tail_temp = re.sub(r"\d+", str(i), end_tail)
                url_temp = url_temp.replace(end_tail, "") + end_tail_temp
                self.logger.info(f"开始爬取 第 {i} 页, url: {url_temp}")
                yield response.follow(
                    url_temp,
                    body=data,
                    method="POST",
                    callback=parse_response,
                    cb_kwargs={"callback": callback, "list_a_xpath": list_a_xpath},
                )

    def parse_response_by_api_ajax_list(self, response, **kwargs):
        list_a_xpath = kwargs.get("list_a_xpath")
        callback = kwargs.get("callback")
        list_select = response.json()["data"]
        for i in list_select:
            url = i["url"]
            yield response.follow(url, callback=callback)

    def gen_request_by_search_jsp(
        self, url=None, params=None, data=None, callback=None, auto_next=True, parse_response=None
    ):
        if url is None:
            url = "http://jxj.hangzhou.gov.cn/module/xxgk/search.jsp"
        if params is None:
            params = {
                "standardXxgk": "0",
                "isAllList": "1",
                "texttype": "0",
                "fbtime": "-1",
                "vc_all": "",
                "vc_filenumber": "",
                "vc_title": "",
                "vc_number": "",
                "currpage": "1",
                "sortfield": "b_settop:0,createdatetime:0,orderid:0",
            }
        if data is None:
            data = {
                "infotypeId": "",
                "jdid": "3244",
                "area": "",
                "divid": "div1692658",
                "vc_title": "",
                "vc_number": "",
                "sortfield": "b_settop:0,createdatetime:0,orderid:0",
                "currpage": "1",
                "vc_filenumber": "",
                "vc_all": "",
                "texttype": "0",
                "fbtime": "-1",
                "standardXxgk": "0",
                "isAllList": "1",
            }
        if '?' not in url:
            url = url + '?'
        temp_url = url
        temp_url += urllib.parse.urlencode(params)
        yield self.FormRequest(
            temp_url,
            formdata=data,
            method="POST",
            callback=self.gen_request_by_search_jsp_mid,
            cb_kwargs={
                "params": params,
                "data": data,
                "callback": callback,
                "auto_next": auto_next,
                "url": url,
                "parse_response": parse_response,
            },
        )

    def gen_request_by_search_jsp_mid(self, response, **kwargs):
        data = kwargs.get("data")
        params = kwargs.get("params")
        callback = kwargs.get("callback")
        auto_next = kwargs.get("auto_next")
        url = kwargs.get("url")
        parse_response = kwargs.get("parse_response")
        meta = response.meta
        total_page = response.xpath("string()").re_first(r"共\s*(\d+)\s*页")
        if not parse_response:
            parse_response = self.parse_response_by_search_jsp
        self.logger.info(f"开始爬取 第 0 页/ 共 {total_page} 页 params: {params} {data}")
        yield from parse_response(response, callback=callback)
        if total_page and auto_next:
            total_page = int(total_page)
            for i in range(1, total_page + 1):
                tmp_params = {**params, **{"currpage": i}}
                tmp_data = {**data, **{"currpage": i}}
                temp_url = url
                temp_url += urllib.parse.urlencode(tmp_params)
                self.logger.info(f"开始爬取 第 {i} 页 params: {tmp_params} {tmp_data}")
                yield self.FormRequest(
                    temp_url,
                    formdata=data,
                    method="POST",
                    callback=parse_response,
                    cb_kwargs={
                        "callback": callback,
                    },
                    meta=meta
                )

    def parse_response_by_search_jsp(self, response, **kwargs):
        callback = kwargs.get("callback")
        list_select = response.xpath("//ul")
        for i in list_select.xpath(".//a"):
            url = i.xpath("./@href").get()
            yield self.Request(url, callback=callback)

    def gen_request_by_dataproxy(
        self, url=None, params=None, data=None, callback=None, auto_next=True, parse_response=None
    ):
        if url is None:
            url = "http://kj.hangzhou.gov.cn/module/jpage/dataproxy.jsp"
        params = {"startrecord": 1, "endrecord": self.page_size * 3, "perpage": int(self.page_size / self.page_range)}
        if data is None:
            data = {
                "col": "1",
                "appid": "1",
                "webid": "3255",
                "path": "/",
                "columnid": "1229397649",
                "sourceContentType": "1",
                "unitid": "7875744",
                "webname": "杭州科技政务网",
                "permissiontype": "0",
            }
        if "webname" in data:
            data["webname"] = urllib.parse.unquote(data["webname"])
        if self.url:
            headers = {"Referer": self.url}
        else:
            headers = {}
        if url[-1] != "?":
            url += "?"
        temp_url = url
        temp_url += urllib.parse.urlencode(params)
        yield self.FormRequest(
            temp_url,
            formdata=data,
            method="POST",
            headers=headers,
            callback=self.gen_request_by_dataproxy_mid,
            cb_kwargs={
                "params": params,
                "data": data,
                "callback": callback,
                "auto_next": auto_next,
                "url": url,
                "headers": headers,
                "parse_response": parse_response,
            },
        )

    def gen_request_by_dataproxy_mid(self, response, **kwargs):
        data = kwargs.get("data")
        params = kwargs.get("params")
        callback = kwargs.get("callback")
        auto_next = kwargs.get("auto_next")
        url = kwargs.get("url")
        headers = kwargs.get("headers")
        parse_response = kwargs.get("parse_response")

        total_page = response.xpath("string(//totalpage)").get()
        if not parse_response:
            parse_response = self.parse_response_by_dataproxy
        self.logger.info(f"开始爬取 第 0 页/ 共 {total_page} 页 params: {params} {data}")
        yield from parse_response(response, callback=callback)
        if total_page and auto_next:
            total_page = int(total_page)
            for i in range(1, total_page + 2):
                params = {
                    "startrecord": ((i - 1) * self.page_size * 3) + 1,
                    "endrecord": i * self.page_size * 3,
                    "perpage": int(self.page_size / self.page_range),
                }
                temp_url = url
                temp_url += urllib.parse.urlencode(params)
                self.logger.info(f"开始爬取 第 {i} 页 params: {params}")
                yield self.FormRequest(
                    temp_url,
                    formdata=data,
                    method="POST",
                    headers=headers,
                    callback=parse_response,
                    cb_kwargs={
                        "callback": callback,
                    },
                )

    def parse_response_by_dataproxy(self, response, **kwargs):
        callback = kwargs.get("callback")
        url_list = response.xpath("//record")
        self.logger.info(f"获取列表数量 {len(url_list)}")
        if not url_list:
            raise Exception("没有获取到url", response.text)
        for idx, i in enumerate(url_list):
            url = i.xpath(".//a/@href").get()
            self.logger.info(f"获取列表序列 {idx}/{len(url_list)} {url}")
            if url and not url.endswith(".pdf"):
                yield self.Request(url, callback=callback)

    def gen_request_by_get_documents(
            self, url=None, params=None, data=None, callback=None, auto_next=True, parse_response=None
    ):
        if url is None:
            url = "https://www.yuncheng.gov.cn/intertidwebapp/govChanInfo/getDocuments"
        if data is None:
            data = {
                "pageIndex": "1",
                "pageSize": "20",
                "siteId": "1",
                "ChannelType": "1",
                "KeyWord": "",
                "KeyWordType": "",
                "chanId": "578",
                "order": "1"
            }
        if self.url:
            headers = {"Referer": self.url}
        else:
            headers = {}
        yield self.FormRequest(
            url,
            formdata=data,
            method="POST",
            headers=headers,
            callback=self.gen_request_by_get_documents_mid,
            cb_kwargs={
                "params": params,
                "data": data,
                "callback": callback,
                "auto_next": auto_next,
                "url": url,
                "headers": headers,
                "parse_response": parse_response,
            },
        )

    def gen_request_by_get_documents_mid(self, response, **kwargs):
        data = kwargs.get("data")
        callback = kwargs.get("callback")
        auto_next = kwargs.get("auto_next")
        url = kwargs.get("url")
        headers = kwargs.get("headers")
        parse_response = kwargs.get("parse_response")
        total_page = response.json()['pageCount']
        if not parse_response:
            parse_response = self.parse_response_by_get_documents
        self.logger.info(f"开始爬取 第 0 页/ 共 {total_page} 页 data: {data}")
        yield from parse_response(response, callback=callback)
        if total_page and auto_next:
            total_page = int(total_page)
            for i in range(1, total_page + 2):
                data = {**data, "pageIndex": f"{i}"}
                self.logger.info(f"开始爬取 第 {i} 页 data: {data}")
                yield self.FormRequest(
                    url,
                    formdata=data,
                    method="POST",
                    headers=headers,
                    callback=parse_response,
                    cb_kwargs={
                        "callback": callback,
                    },
                )

    def parse_response_by_get_documents(self, response, **kwargs):
        callback = kwargs.get("callback")
        url_list = response.json()['list']
        self.logger.info(f"获取列表数量 {len(url_list)}")
        if not url_list:
            raise Exception("没有获取到url", response.text)
        for idx, i in enumerate(url_list):
            url = i['url']
            self.logger.info(f"获取列表序列 {idx}/{len(url_list)} {url}")
            if url and not url.endswith(".pdf"):
                url = urllib.parse.urljoin(self.url, url)
                yield response.follow(url, callback=callback)
