#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time    : 2024/2/6 10:39
# @Author  : 王凯
# @File    : std_main.py
# @Project : scrapy_spider
import concurrent.futures
import hashlib
import re
import sys
from pathlib import Path
from urllib.parse import urljoin

import parsel
import pypinyin
import requests
from jinja2 import Template

from loguru import logger
import warnings

from project_setting import BASE_DIR

warnings.filterwarnings("ignore")
from apps.tax_policy.tax_policy.std_template.config_spider_entry import Stage
from utils.db.mysqldb import MysqlDB


class StdMain:
    db_api = MysqlDB()
    session = requests.Session()
    session.headers = headers = {
        "Accept": "*/*",
        "Accept-Language": "zh-CN,zh;q=0.9,zu;q=0.8,be;q=0.7,en;q=0.6",
        "Cache-Control": "no-cache",
        "Connection": "keep-alive",
        "Pragma": "no-cache",
        "Upgrade-Insecure-Requests": "1",
        "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/121.0.0.0 Safari/537.36",
    }

    def get_datas(self):
        sql = "select * from template_spider_tax_policy where province = '河南省' and city = '平顶山市'"
        return self.db_api.find(sql, to_json=True)

    def gen_spider_name(self, item):
        base_path = "henan"
        source = item["source"]
        url = item["url"]
        source = (
            "".join([j[0][0] for j in pypinyin.pinyin(source, style=pypinyin.NORMAL)]).replace("（", "").replace("）", "")
        )
        hash_data = hashlib.md5(url.encode("utf-8")).hexdigest()[:4]

        spider_name = "{base_path}_{source}_{hash}_policy".format(base_path=base_path, source=source, hash=hash_data)
        item["name"] = spider_name
        item["start_url"] = self.check_whole_url(self.check_whole_url(url))
        logger.info("-" * 100)
        logger.debug(item["url"])
        logger.debug(item["start_url"])
        return item

    def _request_status_code(self, url, **kwargs):
        try:
            resp = self.session.get(url, timeout=5, verify=False)
            response = parsel.Selector(text=resp.content.decode())
            more_url = (
                    response.xpath('//a[@class="more"]/@href').get() or response.xpath('//a[text()="更多"]/@href').get()
            )
            if more_url:
                url = urljoin(resp.url, more_url)
            return resp.status_code, url
        except Exception as e:
            print(e)
        return 500, url

    def check_whole_url(self, url):
        if ".htm" in url:
            status_code, new_url = self._request_status_code(url)
            if status_code == 404:
                status_code, new_url = self._request_status_code(url.replace(".htm", ".shtm"))
            return new_url

        if ".shtm" in url:
            status_code, new_url = self._request_status_code(url)
            if status_code == 404:
                status_code, new_url = self._request_status_code(url.replace(".shtm", ".htm"))
            return new_url

        if url.endswith("/"):
            new_url = f"{url}index.html"
            status_code, new_url = self._request_status_code(new_url)
            if status_code != 404:
                return new_url

            new_url = f"{url}index.shtml"
            status_code, new_url = self._request_status_code(new_url)
            if status_code != 404:
                return new_url
            new_url = f"{url}index.htm"
            status_code, new_url = self._request_status_code(new_url)
            if status_code != 404:
                return new_url
            new_url = f"{url}index.shtm"
            status_code, new_url = self._request_status_code(new_url)
            if status_code != 404:
                return new_url

        else:
            if not url.endswith("_1"):
                new_url = f"{url}_1"
                status_code, new_url = self._request_status_code(new_url)
                if status_code != 404:
                    return new_url
        return url

    def run_fill_spider_name_and_std_url(self):
        datas = self.get_datas()
        with concurrent.futures.ThreadPoolExecutor(max_workers=10) as t:
            tasks = {t.submit(self.gen_spider_name, item): item for item in datas}

        results = []

        for task in concurrent.futures.as_completed(tasks.keys()):
            item = tasks[task]
            result = task.result()
            results.append(result)

        self.db_api.add_batch_smart("template_spider_tax_policy", results, update_columns=["start_url", "name"])

    def run_fill_list_stages(self):
        datas = self.get_datas()
        datas = [i for i in datas if i["state"] != 1]
        list_templates = self.db_api.find("select * from template_spider_stages where is_list = 1", to_json=True)
        detail_templates = self.db_api.find("select * from template_spider_stages where is_list = 0", to_json=True)
        for src_data in datas:
            data = self.run_list_stages(src_data.copy(), list_templates)
            data = self.run_detail_stages(data, detail_templates)
            if "list_stages" in data and "detail_stages" in data:
                list_stages = Stage(**data["list_stages"]).dict()
                detail_stages = Stage(**data["detail_stages"]).dict()
                stages = [list_stages, detail_stages]
                save_item = {**src_data, "start_stage": "list", "stages": stages}
                self.db_api.add_smart(
                    "template_spider_tax_policy", save_item, update_columns=["stages", "start_stage"]
                )
                # is_save = input('是否保存 (1.保存)\n')
                # if str(is_save) == '1':
                #     self.db_api.add_batch_smart("template_spider_tax_policy", [save_item], update_columns=["stages"])
                #     logger.info(f"保存 {save_item}")
                # else:
                #     logger.info("跳过")
            if data['state'] == -1:
                save_item = {**src_data, "state": data['state']}
                self.db_api.add_smart(
                    "template_spider_tax_policy", data, update_columns=["state"]
                )
            logger.info("*" * 100)

    def run_detail_stages(self, item, stages):
        if "detail_list" in item:
            detail_list = item["detail_list"]
            detail_list = [dict(t) for t in {tuple(d.items()) for d in detail_list}]
            mid_idx = len(detail_list) // 2
            detail_mid_url_list = detail_list[mid_idx:]
            for detail_mid_url_dict in detail_mid_url_list:
                detail_mid_url = detail_mid_url_dict["source_url"]
                try:
                    detail_response = self.session.get(detail_mid_url, timeout=5, verify=False)
                    init_text = detail_response.content.decode()
                    for stage in stages:
                        selector = init_selector = parsel.Selector(text=init_text)
                        _item = {"url": detail_mid_url}
                        for field in stage["fields"]:
                            _tmp_xpath_rules = field["xpath"] + ("/@" + field["attr"] if field["attr"] else "")
                            _tmp_field = selector.xpath(_tmp_xpath_rules)
                            if field["re_first"]:
                                _tmp_field = _tmp_field.re_first(field["re_first"])
                            else:
                                _tmp_field = _tmp_field.get()
                            if field["name"] == "content":
                                continue
                            _item[field["name"]] = (_tmp_field or "").strip()
                        if not _item["title"] or not _item["publish_date"]:
                            logger.warning(f"{_item} {stage}")
                        else:
                            logger.debug(f"{_item} {stage}")
                            item["detail_stages"] = stage
                            return item
                    else:
                        logger.error(detail_mid_url)
                except Exception as e:
                    logger.error(e)
        return item

    def run_list_stages(self, item, stages):
        for k, v in item.items():
            if k in ["name", "province", "city", "county", "park", "source", "url"]:
                item[k] = v or ''
        try:
            init_response = self.session.get(item["start_url"], timeout=5, verify=False)
            if init_response.status_code == 404:
                logger.warning(f"{item['start_url']} 404")
                item["state"] = -1
                return item
            init_text = init_response.content.decode()
            logger.info("-" * 100)
            # from gerapy_auto_extractor import is_list, extract_detail, extract_list
            # from gerapy_auto_extractor.utils.helper import jsonify
            # if is_list(init_text):
            #     dist_list = extract_list(init_text)
            #     if dist_list and len(dist_list) > 3:
            #         logger.info(item["start_url"])
            #         detail_list = []
            #         for i in dist_list:
            #             i.update({"url": urljoin(item["start_url"], i['url'])})
            #             detail_list.append({"source_url": i['url'], "title": i['title']})
            #             logger.debug(f"{i['title']} {i['url']}")
            #         item["list_stages"] = Stage(**{
            #             "name": "list",
            #             "fields": [],
            #             "is_list": True
            #         }).dict()
            #         item['detail_list'] = detail_list
            #         return item
            #     else:
            #         logger.error(f"{item['start_url']} {item.get('list_stages') or ''}")
            # else:
            #     logger.error(f"{item['start_url']} {item.get('list_stages') or ''}")
            for stage in stages:
                selector = init_selector = parsel.Selector(text=init_text)
                if stage["page_loop_xpath"]:
                    selector = selector.xpath(f'{stage["page_loop_xpath"]}')
                if stage["page_loop_re_first"]:
                    selector = selector.re_first(rf'{stage["page_loop_re_first"]}')
                if isinstance(selector, str):
                    msg = f"{item['start_url']} page_num = {selector}"
                    item["list_stages"] = stage
                    if stage["page_loop_sub_src"] not in item["start_url"]:
                        if not re.findall(rf"{stage['page_loop_sub_src']}", item["start_url"]):
                            continue
                    if int(selector) > 2:
                        new_url = re.sub(
                            rf"{stage['page_loop_sub_src']}",
                            rf""
                            rf"{stage['page_loop_sub_target'].format((int(selector) - 1) // stage['page_loop_page_size'])}",
                            item["start_url"],
                        )
                    else:
                        new_url = item["start_url"]
                    status_code, new_url = self._request_status_code(new_url)
                    list_datas = init_selector.xpath(stage["list_xpath"])
                    msg += f" next_url {new_url} next_status_code {status_code} list_num " f"{len(list_datas)}"
                    logger.success(msg + f'\n {item["list_stages"]}')
                    logger.info("-" * 100)
                    detail_list = []
                    for i in list_datas:
                        _item = {}
                        for field in stage["fields"]:
                            _tmp_xpath_rules = field["xpath"] + ("/@" + field["attr"] if field["attr"] else "")
                            _tmp_field = i.xpath(_tmp_xpath_rules)
                            if field["re_first"]:
                                _tmp_field = _tmp_field.re_first(field["re_first"])
                            else:
                                _tmp_field = _tmp_field.get()
                            _item[field["name"]] = (_tmp_field or "").strip()
                            if "url" in field["name"]:
                                _tmp_field = urljoin(item["start_url"], _tmp_field)
                                _item[field["name"]] = _tmp_field
                        detail_list.append(_item)
                    item["detail_list"] = detail_list
                    logger.info("-" * 100)
                    return item
            else:
                if "parseType" in init_text:
                    logger.success(f"create spider {item['name']}.py")
                    template = Template(
                        open(Path(__file__).parent.parent / "template" / "policy_unit_query_data_spider.templ", "r", encoding="utf-8").read()
                    )
                    with open(Path(f"{BASE_DIR}/apps/tax_policy/tax_policy/spiders/henan") / f"{item['name']}.py", "w", encoding="utf-8") as f:
                        f.write(template.render(**item, function_name=item['name'].title()))
                    item['state'] = 1
                    self.db_api.add_smart(
                        "template_spider_tax_policy", item, update_columns=["state"]
                    )
                elif "script.json" in init_text:
                    logger.success(f"create spider {item['name']}.py")
                    template = Template(
                        open(Path(__file__).parent.parent / "template" / "policy_script_json_data_spider.templ", "r", encoding="utf-8").read()
                    )
                    with open(Path(f"{BASE_DIR}/apps/tax_policy/tax_policy/spiders/henan/cache/") / f"{item['name']}.py", "w", encoding="utf-8") as f:
                        f.write(template.render(**item, function_name=item['name'].title()))
                    # item['state'] = 1
                    # self.db_api.add_smart(
                    #     "template_spider_tax_policy", item, update_columns=["state"]
                    # )
                elif "https://user.pds.gov.cn/api/Outer/T?parms=opt/dynamic/" in init_text:
                    logger.success(f"create spider {item['name']}.py")
                    template = Template(
                        open(Path(__file__).parent.parent / "template" / "policy_script_dynamic_spider.templ", "r", encoding="utf-8").read()
                    )
                    with open(Path(f"{BASE_DIR}/apps/tax_policy/tax_policy/spiders/henan/cache/") / f"{item['name']}.py", "w", encoding="utf-8") as f:
                        f.write(template.render(**item, function_name=item['name'].title()))
                    # item['state'] = 1
                    # self.db_api.add_smart(
                    #     "template_spider_tax_policy", item, update_columns=["state"]
                    # )
                else:
                    template = Template(
                        open(Path(__file__).parent.parent / "template" / "policy_simple_spider.templ", "r", encoding="utf-8").read()
                    )
                    with open(Path(f"{BASE_DIR}/apps/tax_policy/tax_policy/spiders/henan/cache/") / f"{item['name']}.py", "w", encoding="utf-8") as f:
                        f.write(template.render(**item, function_name=item['name'].title().replace("_", "")))
                    logger.error(f"{item['start_url']} {item.get('list_stages') or ''}")
        except Exception as e:
            logger.exception(e)
            template = Template(
                open(Path(__file__).parent.parent / "template" / "policy_simple_spider.templ", "r", encoding="utf-8").read()
            )
            with open(Path(f"{BASE_DIR}/apps/tax_policy/tax_policy/spiders/henan/cache/") / f"{item['name']}.py", "w", encoding="utf-8") as f:
                f.write(template.render(**item, function_name=item['name'].title().replace("_", "")))

        return item

    def run(self):
        # self.run_fill_spider_name_and_std_url()
        self.run_fill_list_stages()


if __name__ == "__main__":
    test = StdMain()
    test.run()
