import random
import re
import time
import traceback
import uuid
from typing import Union

import requests
from requests.cookies import RequestsCookieJar

from sam import import_csv_path
from sam.util import csvUtil
from sam.util.beanUtil import replace_dict_none_properties
from sam.util.crawler.lite.spider import BaseSpider
from sam.util.crawler.demo.qcc_cookies import get_random_cookie
from sam.util.dateUtil import get_now_month_day_str
from sam.util.dbUtil import DB, DEFAULT_PROCESS_DB_CONNECTION_CONFIG
from sam.util.httpUtil import get_url_encode_str
from sam.util.strUtil import pre_handle_str_save_space, base_filter_global_symbol_re, pre_handle_str
from sam.wrapper.CommonWrapper import print_task_cost_time

qcc_crawl_filed_list = [
    '企查查名称'
    , '法人'
    , '注册资本'
    , '实缴资本'
    , '状态'
    , '成立日期'
    , '统一社会信用代码'
    , '纳税人识别号'
    , '注册号'
    , '组织机构代码'
    , '发证日期'
    , '公司类型'
    , '所属行业'
    , '发证机关'
    , '区域信息'
    , '英文名称'
    , '曾用名'
    , '医保人数'
    , '公司规模'
    , '营业期限'
    , '地址信息'
    , '经营范围'
    , '分支机构'
]

unit_list = [
    ""
    , "十"
    , "百"
    , "千"
    , "万"
]

text_list = [
    "零"
    , "一"
    , "二"
    , "三"
    , "四"
    , "五"
    , "六"
    , "七"
    , "八"
    , "九"
]


def transfer_digital_2_text(num_str: str) -> str:
    max_size = len(num_str)
    new_num_str_list = []
    for i, c in enumerate(num_str):
        new_num_str_list.append(text_list[int(c)])
        if c == "0":
            continue
        new_num_str_list.append(unit_list[max_size - 1 - i])

    while len(new_num_str_list) > 1:
        last_text = new_num_str_list.pop()
        if last_text == "零":
            continue
        else:
            new_num_str_list.append(last_text)
            break
    res = "".join(new_num_str_list)
    if "一十" in res:
        return res.replace("一十", "十")
    else:
        return res


class QccSpider(BaseSpider):

    def __init__(self):
        super().__init__(name="qcc", level="debug")
        # self.proxy = WmProxy()
        self.ok_status = [
            "存续", "在业", "迁入", "筹建"
        ]
        self.filter_re = re.compile('[",]+')

    def prepare_cookies(self) -> Union[dict, RequestsCookieJar]:
        cookie_index, qcc_cookies = get_random_cookie()
        self.logger.debug(f"当前使用的cookie是: {cookie_index} ")
        cookie_jar = requests.cookies.RequestsCookieJar()
        for cookie in qcc_cookies:
            cookie_jar.set(
                name=cookie["name"]
                , value=cookie["value"]
                , domain=cookie["domain"]
                , path=cookie["path"]
            )
        return cookie_jar

    def prepare_proxies(self) -> dict:
        # return self.proxy.get_proxies()
        pass

    # 解析 url
    def parse_url(self, url, soup) -> list:
        # return self.parser_url_4_smart_choose_module(url, soup)
        return self.parse_url_4_simple_module_4_list(url, soup)

    # 简单模式
    @staticmethod
    def parse_url_4_simple_module(url, soup) -> list:
        if "search" in url:
            detail_url = soup.select(r'#search-result > tr > td:nth-child(3) > a')
            if detail_url:
                return [f"https://www.qichacha.com{detail_url[0]['href']}"]

    # 简单模式
    def parse_url_4_simple_module_4_list(self, url, soup) -> list:
        if "search" in url:
            search_result_list = soup.select(r'#search-result tr')
            if search_result_list:
                name = self.select(search_result_list[0], "td:nth-child(3) > a")
                # search-result > tr:nth-child(1) > td:nth-child(3) > p:nth-child(5)
                address = self.select(search_result_list[0], "td:nth-child(3) > p:nth-child(5)")
                if name:
                    return [f"{name}, {address}"]

    # 智能选择模式
    def parser_url_4_smart_choose_module(self, url, soup) -> list:
        if "search" in url:
            search_keywords = soup.select('input#headerKey')[0].attrs["value"]
            search_result_list = soup.select(r'#search-result tr')
            if search_result_list:
                new_url = self.choose_search_result(search_result_list, search_keywords)
                if new_url:
                    return [new_url]

    # 选择 搜索结果
    def choose_search_result(self, search_result_list: list, search_keywords: str) -> str:
        maybe_match_list = []
        for search_result in search_result_list:
            name, status, alias, url = self.parse_search_result_info(search_result)
            if alias and alias == search_keywords:
                maybe_match_list.append((name, status, url, alias))
            if self.assert_name_is_same(search_keywords, name):
                maybe_match_list.append((name, status, url, alias))
        if maybe_match_list:
            return self.choose_order(maybe_match_list, search_keywords)

    # 解析 搜索列表
    def parse_search_result_info(self, search_result):
        name = self.select(search_result, "td:nth-child(3) > a")
        status = self.select(search_result, "td:nth-child(4) > span")
        link_info = self.select(search_result, "td:nth-child(3) > p:nth-child(6)")
        url = search_result.select('td:nth-child(3) > a')[0]['href']
        alias = None
        if link_info and "曾用名:" in link_info:
            alias = link_info[4:]
        return name, status, alias, url

    def assert_name_is_same(self, original_name: str, target_name: str) -> bool:
        pre_target_name = self._pre_handle_str(target_name)
        return original_name == pre_target_name

    @staticmethod
    def choose_order(maybe_match_list: list, search_keywords: str) -> str:
        """
        按照顺序选择：名字和名字一样， 名字和曾用名一样， 名字一样
        :return:
        """
        # final_choose_result = None
        for maybe_match in maybe_match_list:
            if maybe_match[0] == search_keywords or (maybe_match[3] and maybe_match[3] == search_keywords):
                return f"https://www.qichacha.com{maybe_match[2]}"

    # 注销状态 不返回结果 只记录日志
    def abandon_closed_status(self, maybe_match_list: list, search_keywords: str) -> str:
        for maybe_match in maybe_match_list:
            if maybe_match[1] in self.ok_status:
                return f"https://www.qichacha.com{maybe_match[2]}"
            else:
                self.logger.error(f" ['search_keywords': {search_keywords}"
                                  f"; 'match_name': {maybe_match[0]}"
                                  f"; 'status': {maybe_match[1]}"
                                  f"; 'detail_url': {maybe_match[2]}]")

    # 注销状态 也返回结果
    def save_closed_status(self, maybe_match_list: list, search_keywords: str) -> str:
        for maybe_match in maybe_match_list:
            if maybe_match[1] in self.ok_status:
                return f"https://www.qichacha.com{maybe_match[2]}"
        else:
            self.logger.info(f" 'search_keywords': {search_keywords} 搜索结果中 只有注销状态, 这里也将返回注销状态的搜索结果 ")
            return f"https://www.qichacha.com{maybe_match_list[0][2]}"

    def parse_data(self, url, soup) -> dict:
        if "search" not in url:
            pre_address = self.select(soup, "#Cominfo > table:nth-child(4) tr:nth-child(10) > td:nth-child(2)")
            address = pre_address.split("\n")[0].strip() if pre_address else None
            address = address.replace(",", "、") if address and "," in address else address
            pre_branch_org = self.select(soup, "#Subcom > table")
            if pre_branch_org:
                pre_branch_org = re.sub("\d+", "*", pre_branch_org).strip()
                branch_org = re.sub("\s", "", pre_branch_org)
            else:
                branch_org = None
            business_scope = self.select(soup, "#Cominfo > table:nth-child(4) tr:nth-child(11) > td:nth-child(2)")
            business_scope = business_scope.replace("\n", " ") \
                if business_scope and "\n" in business_scope else business_scope
            business_scope = business_scope.replace(",", "、") \
                if business_scope and "," in business_scope else business_scope
            return {
                "name": self.select(soup, ".content  h1"),
                "legal_repr": self.select(
                    soup,
                    "#Cominfo > table:nth-child(3) tr:nth-child(2) > td.ma_left > div > div.clearfix > "
                    "div:nth-child(2) > a > h2"),
                "register_capital": self.select(
                    soup,
                    "#Cominfo > table:nth-child(4) tr:nth-child(1) > td:nth-child(2)"),
                "real_capital": self.select(soup, "#Cominfo > table:nth-child(4) tr:nth-child(1) > td:nth-child(4)"),
                "status": self.select(soup, ".content > div.row.tags > span:nth-child(1)"),
                "register_date": self.select(soup, "#Cominfo > table:nth-child(4) tr:nth-child(2) > td:nth-child(4)"),
                "uniform_social_credit_code": self.select(
                    soup,
                    "#Cominfo > table:nth-child(4) tr:nth-child(3) > td:nth-child(2)"),
                "taxpayer_identification_code": self.select(
                    soup,
                    "#Cominfo > table:nth-child(4) tr:nth-child(3) > td:nth-child(4)"),
                "registration_code": self.select(
                    soup,
                    "#Cominfo > table:nth-child(4) tr:nth-child(4) > td:nth-child(2)"),
                "organization_code": self.select(
                    soup,
                    "#Cominfo > table:nth-child(4) tr:nth-child(4) > td:nth-child(4)"),
                "confirm_date": self.select(soup, "#Cominfo > table:nth-child(4) tr:nth-child(6) > td:nth-child(2)"),
                "company_type": self.select(soup, "#Cominfo > table:nth-child(4) tr:nth-child(5) > td:nth-child(2)"),
                "business_type": self.select(soup, "#Cominfo > table:nth-child(4) tr:nth-child(5) > td:nth-child(4)"),
                "confirm_org": self.select(soup, "#Cominfo > table:nth-child(4) tr:nth-child(6) > td:nth-child(4)"),
                "province": self.select(soup, "#Cominfo > table:nth-child(4) tr:nth-child(7) > td:nth-child(2)"),
                "english_name": self.select(soup, "#Cominfo > table:nth-child(4) tr:nth-child(7) > td:nth-child(4)"),
                "ago_name": self.select(soup, "#Cominfo > table:nth-child(4) tr:nth-child(8) > td:nth-child(2)"),
                "insurance_numbers": self.select(
                    soup,
                    "#Cominfo > table:nth-child(4) tr:nth-child(8) > td:nth-child(4)"),
                "company_numbers": self.select(soup, "#Cominfo > table:nth-child(4) tr:nth-child(9) > td:nth-child(2)"),
                "business_period": self.select(soup, "#Cominfo > table:nth-child(4) tr:nth-child(9) > td:nth-child(4)"),
                "address": address,
                "business_scope": business_scope,
                "branch_org": branch_org,
                "url": url
            }

    @staticmethod
    def select(soup, css_rule) -> str:
        select_list = soup.select(css_rule)
        if select_list:
            content = select_list[0].text
            if content:
                content = pre_handle_str_save_space(content)
                if content == "-":
                    content = None
                return content

    def output(self):
        if self.data_list:
            self.data_list = self.data_list[0]

    @staticmethod
    def _pre_handle_str(s: str):
        return pre_handle_str(s, filter_re=base_filter_global_symbol_re)

    def crawl_qcc_by_name(self, client_name: str) -> dict:
        try:
            t = random.randint(1, 3)
            time.sleep(t)
            client_name = pre_handle_str(client_name)
            another_client_name = None
            if re.search("\d+", client_name):
                digital_num = re.sub("\D+", "", client_name)
                digital_text = transfer_digital_2_text(digital_num)
                another_client_name = client_name.replace(digital_num, digital_text)
            client_name_encode = get_url_encode_str(client_name)
            begin_url = f'https://www.qichacha.com/search?key={client_name_encode}'
            crawl_result = self.query_once(begin_url)
            self.logger.info(f" {client_name} 的爬取结果是: {crawl_result}")
            if crawl_result:
                return crawl_result[0]
            else:
                if another_client_name:
                    return self.crawl_qcc_by_name(another_client_name)
        except Exception as e:
            self.logger.error(f"抓取 {client_name}  出现异常: {e}")
            self.logger.error(traceback.print_exc())

    def crawl_qcc_by_name_only_url(self, client_name: str):
        try:
            t = random.randint(1, 3)
            time.sleep(t)
            # client_name = pre_handle_str(client_name)
            client_name_encode = get_url_encode_str(client_name)
            begin_url = f'https://www.qichacha.com/search?key={client_name_encode}'
            crawl_result = self.query_list(begin_url)
            if crawl_result:
                # self.logger.info(f" {client_name} 的爬取结果是: {crawl_result[0]}")
                return crawl_result[0]
        except Exception as e:
            self.logger.error(f"抓取 {client_name}  出现异常: {e}")
            self.logger.error(traceback.print_exc())

    def crawl_qcc_by_url(self, info_url: str) -> dict:
        try:
            crawl_result = self.query_once(info_url, optional="byUrl")
            if crawl_result:
                return crawl_result[0]
            else:
                t = random.randint(5, 9)
                time.sleep(t / 10)
        # 特定异常 index out of list range  这种异常 可以 再重新爬取一次
        except Exception as e:
            self.logger.error(f"抓取 {info_url}  出现异常: {e}")
            t = random.randint(5, 9)
            time.sleep(t / 10)


@print_task_cost_time
def main():
    spider = QccSpider()
    lines = csvUtil.csv_read(import_csv_path)
    # search_name_list = [
    #     # "国药控股泉州有限公司",
    #     # "大石桥市东升大药房",
    #     # "大石桥市天祥大药房",
    #     # "东北制药集团供销有限公司",
    #     # "抚顺市中心大药房连锁店",
    #     # "深圳市广恒药业有限公司",
    #     # "深圳市泰康堂医药有限公司",
    #     # "清远市清城区源潭镇康源大药房",
    #     # "遵义县尚嵇镇启鸿药店",
    #     "澄迈永发快九通永旭药店",
    #     # "广东国药医药连锁企业有限公司石牌",
    #     # "莘县医药公司永康大药店",
    #     # "华润济宁医药有限公司",
    # ]
    # search_name_list = [line[0] for line in lines]
    search_name_list = [f"海王星辰 {line[0]}" for line in lines]
    for search_name in search_name_list:
        # search_name = "重庆润丰金太阳大药房连锁有限公司金太阳大药房一百五十三店"
        # search_name = "欣鑫药房江津区寿康药店"
        # search_name = "重庆桐君阁大药房连锁有限责任公司沙坪坝区203店"
        # search_name = "诸城市鑫诚平民医药连锁有限公司二零二店"
        # search_name = "营山本山大药房连锁有限公司第69店"
        # search_name = "成都市武侯区天天大药房"
        # search_name = "南江县民生大药房红四门店"
        # search_name = "成华区聚福康药房"
        # search_name = "广灵县益生大药房"
        # search_name = "瓦房店市一乐药房连锁有限公司民权分店"
        # search_name = "兴国县老百姓大药房永丰店"
        # search_name = "新余市高新区江畔大药房"
        # search_name = "南丰县朝晖大药房"
        # search_name = "邢台恒生医药连锁有限公司临西县第六药房"
        # search_name = "邹平仁益医药连锁有限公司第四十八分店"
        # crawl_result = spider.crawl_qcc_by_name(search_name)
        # print(crawl_result)
        # info_url = "https://www.qichacha.com/firm_7de2091ece0b46d92d81a8440928bc38.html"
        # info_url = "https://www.qichacha.com/firm_b545ed8ed9f71bb3e41710d163568830.html"
        crawl_result2 = spider.crawl_qcc_by_name_only_url(search_name)
        if crawl_result2:
            print(f"{search_name},{crawl_result2[0]}")
        else:
            print(f"{search_name},在企查查没有找到")


class CrawlAndInsert(object):
    def __init__(self, flag: str):
        super().__init__()
        self.crawler = QccSpider()
        self.db = DB(config=DEFAULT_PROCESS_DB_CONNECTION_CONFIG)
        self.ago_prefix = get_now_month_day_str()
        self.flag = flag

    def insert_process(self, item_list: list):
        if item_list:
            self.db.batch_update_by_item_list(
                item_list
                , "qcc_address"
                , optional="InsertOrUpdate"
                , exclude_properties=["uuid"]
                # , is_only_print_sql=True
            )
        else:
            self.db.logger.error("没有要入库的数据")

    def convert_crawl_result(self, item) -> dict:
        return {
            "uuid": f"{uuid.uuid1()}",
            "name": item.get('name', ''),
            "register_capital": item.get('register_capital', ''),
            "legal_repr": item.get('legal_repr', ''),
            "register_date": item.get('register_date', ''),
            "status": item.get('status', ''),
            "addr": item.get('address', ''),
            "uniform_social_credit_code": item.get('uniform_social_credit_code', ''),
            "taxpayer_identification_code": item.get('taxpayer_identification_code', ''),
            "registration_code": item.get('registration_code', ""),
            "organization_code": item.get('organization_code', ""),
            "company_type": item.get('company_type', ""),
            "business_type": item.get('business_type', ""),
            "province": item.get('province', ""),
            "business_scope": item.get('business_scope', ""),
            "source": f"{self.flag}-{self.ago_prefix}-sam-qcc"
        }

    def crawl_by_url(self):
        url_list = [
            "https://www.qichacha.com/firm_f79da1ef91a8688eec499ea02eae59cc.html"
        ]
        item_list = []
        for url in url_list:
            crawl_result2 = self.crawler.crawl_qcc_by_url(url)
            new_item = self.convert_crawl_result(crawl_result2)
            new_item = replace_dict_none_properties(new_item, "null")
            item_list.append(new_item)
        self.insert_process(item_list)

    def crawl_by_name(self, is_read_file: bool = False):
        is_read_file = True
        if is_read_file:
            lines = csvUtil.csv_read(import_csv_path)
            name_list = [line[0] for line in lines]
        else:
            name_list = [
                "大竹县四合乡达州市天泰同安药房连锁有限公司421连锁店"
            ]
        item_list = []
        for name in name_list:
            crawl_result2 = self.crawler.crawl_qcc_by_name(name)
            if crawl_result2 and crawl_result2.get("name", None):
                new_item = self.convert_crawl_result(crawl_result2)
                new_item = replace_dict_none_properties(new_item, "null")
                item_list.append(new_item)
            else:
                self.db.logger.info(f"{name} 没有找到结果")
        self.insert_process(item_list)


def demo_insert():
    process = CrawlAndInsert("yb-lx-1904")
    process.crawl_by_name()


def demo_print():
    spider = QccSpider()
    lines = csvUtil.csv_read(import_csv_path)
    search_name_list = [line[0] for line in lines]
    for search_name in search_name_list:
        crawl_result2 = spider.crawl_qcc_by_name_only_url(search_name)
        if crawl_result2:
            print(f"{search_name},{crawl_result2[0]}")
        else:
            print(f"{search_name},在企查查没有找到")


if __name__ == "__main__":
    main()
