import datetime
import json
from time import sleep
import re
from utils.log import logger
from colorama import Fore
from tqdm import tqdm

from export.export import export_to_excel, export_to_json
from lib.request import get_req


def get_pid_by_keyword(name, t=0) -> dict:
    """通过关键词查询对应公司,返回pid """
    company = name
    url_prefix = 'https://www.baidu.com/'
    url = 'https://aiqicha.baidu.com/s?q=' + company + '&t=0'
    content = get_req(url, url_prefix, False)
    item = parse_index(content)
    return item


def parse_index(content):
    tag_2 = '/* eslint-enable */</script><script data-app'
    tag_1 = 'window.pageData ='
    idx_1 = content.find(tag_1)
    idx_2 = content.find(tag_2)
    # 判断关键词区间中的JSON数据来进行匹配
    if idx_2 > idx_1:
        # 关键词提取判断，去除多余字符
        my_str = content[idx_1 + len(tag_1): idx_2].strip()
        my_str = my_str.replace("\n", "")
        my_str = my_str.replace("window.isSpider = null;", "")
        my_str = my_str.replace("window.updateTime = null;", "")
        my_str = my_str.replace(" ", "")
        len_str = len(my_str)
        if my_str[len_str - 1] == ';':
            my_str = my_str[0:len_str - 1]
            # 数据JSON转化
            j = json.loads(my_str)
            if 'resultList' not in j['result']:
                print('该关键字无法查询')
                exit(0)
            if len(j["result"]["resultList"]) > 0:
                item = j["result"]["resultList"][0]
                return item

            return j["result"]
    logger.error("【关键词数据提取失败】 {}".format(idx_1))
    return


def parse_detail(content):
    tag_2 = '/* eslint-enable */</script><script data-app'
    tag_1 = 'window.pageData ='
    idx_1 = content.find(tag_1)
    idx_2 = content.find(tag_2)
    if idx_2 > idx_1:
        my_str = content[idx_1 + len(tag_1): idx_2].strip()
        my_str = my_str.replace("\n", "")
        my_str = my_str.replace("window.isSpider = null;", "")
        my_str = my_str.replace("window.updateTime = null;", "")
        my_str = my_str.replace(" ", "")
        # my_str = content[idx_1 + len(tag_1): idx_2].strip()
        len_str = len(my_str)
        if my_str[len_str - 1] == ';':
            my_str = my_str[0:len_str - 1]
        j = json.loads(my_str)
        return j["result"]
    else:
        return ''


def get_info_list(pid, types):
    """获取信息列表
    
    Args:
        pid: 对应公司的pid号
        types: api endpoint
    """
    logger.info("查询API {} ".format(types))
    url_prefix = 'https://www.baidu.com/'
    url = "https://aiqicha.baidu.com/"
    url += types
    url += "?size=100&pid=" + pid
    content = get_req(url, url_prefix, True, True)
    res_data = json.loads(content)
    list_data = []
    if res_data['status'] == 0:
        data = res_data['data']
        if types == "relations/relationalMapAjax":
            data = data['investRecordData']
        page_count = data['pageCount']
        if page_count > 1:
            proxy_bar = tqdm(total=page_count, desc="【INFO_LIST】",
                             bar_format='{l_bar}%s{bar}%s{r_bar}' % (Fore.BLUE, Fore.RESET))
            for t in range(1, page_count + 1):
                proxy_bar.update(1)
                req_url = url + "&p=" + str(t) + "&page=" + str(t)
                content = get_req(req_url, url_prefix, True, True)
                res_s_data = json.loads(content)['data']
                list_data.extend(res_s_data['list'])
            proxy_bar.close()
        else:
            list_data = data['list']
    return list_data


class Scan(object):
    def __init__(self, cookie=''):
        # 文件配置项
        self.cookie = cookie  # 是否添加Cookie信息（打开浏览器，打开爱企查，复制cookie）
        # 是否开启代理 (速度变慢，但提高稳定性)
        self.version = "v1.0.0"
        self.subdomains = []
        self.company_name = ''

    def get_item_name(self, item):
        entName = item['entName']
        pattern = re.compile(r'<[^>]+>', re.S)
        result = pattern.sub('', entName)
        return item['pid'], result

    def access_pid(self, pid, url_prefix) -> str:
        url = "https://aiqicha.baidu.com/company_detail_" + pid
        content = get_req(url, url_prefix, True)
        res = parse_detail(content)
        return res

    def access_des(self, pid, url_prefix, t=0):
        url = "https://aiqicha.baidu.com/compdata/navigationListAjax?pid=" + pid
        res = get_req(url, url_prefix, True, is_json=True)
        if res:
            res = json.loads(res)['data']
            return res
        return {}

    def get_basic_info(self, pid):
        """获取公司基本信息
           邮箱、地址、网站、董事、企业名称、状态（开业、注销）、手机号码
           newtabs 用于获取公司其它信息，包括知识产权等

        """
        item_detail = self.access_pid(pid, "")
        item_detail['newTabs'] = self.access_des(pid, "")
        return item_detail

    def icp_query(self, pid):
        print('- ICP备案数据提取')
        icp_info = get_info_list(pid, 'detail/icpinfoAjax')
        icp_results = []
        for icp_item in icp_info:
            for domain_item in icp_item['domain']:
                icp_t = {
                    # "entName": self.s_info['entName'],
                    "siteName": icp_item['siteName'],
                    "homeSite": icp_item['homeSite'][0],
                    "icpNo": icp_item['icpNo'],
                    "domain": domain_item,
                }
                self.subdomains.append(domain_item)
                icp_results.append(icp_t)
        return icp_results

    def app_query(self, pid):
        print("-APP信息-")
        info_res = get_info_list(pid, "c/appinfoAjax")
        return info_res

    def wechat_query(self, pid):
        print("-微信公众号信息-")
        results = []
        info_res = get_info_list(pid, "c/wechatoaAjax")
        return info_res

    def copyright_info(self, pid):
        print("-软件著作-")
        copyright_info = get_info_list(pid, "detail/copyrightAjax")
        return copyright_info

    def micro_blog_query(self, pid):
        print("-微博信息-")
        info_res = get_info_list(pid, "c/microblogAjax")
        return info_res

    def supplier_query(self, pid):
        print("-供应商信息-")
        copyright_info = get_info_list(pid, "c/supplierAjax")
        return copyright_info

    def check_name(self, name) -> tuple:
        # self.check_proxy()
        logger.info("【开始查询关键词】 {}".format(name))
        item = get_pid_by_keyword(name)
        if item:
            res = (item['pid'], item['entName'])
            print("【根据关键词查询到公司】 " + res[1])
            return res

        logger.error("【未查询到关键词】 {}".format(name))
        return ()


class AiqichaScan:
    def __init__(self, cookie, keyword):
        self.cookie = cookie
        self.Scan = Scan(cookie)
        self.keyword = keyword
        self.pid = ''
        self.basic_info = ''
        self.details = ''
        self.company_name = ''
        self.domains = []
        self.save_path = ''

    @property
    def columns_dicts(self):
        return {
            'principalName': '账号主体',
            'wechatId': '公众号ID',
            'wechatName': '账号名称',
            'wechatIntruduction': '公众号介绍',
            'wechatLogo': '账号Logo',
            'qrcode': '二维码',
            'supplier': '供应商名称',
            'name': '名称',
            'classify': '类别',
            'logoWord': 'Logo名称',
            'logoBrief': 'Logo简介',
            'entName': '所属企业',
            'domain': '域名',
            'siteNmae': '站点名称',
            'homeSite': '首页',
            'icpNo': '备案号',
        }

    def get_basic_info(self):
        self.pid, self.company_name = self.Scan.check_name(self.keyword)
        # 基础信息包括 email, addr, website, legalPerson, entName, openStatus, telephone
        self.basic_info = self.Scan.get_basic_info(self.pid)
        self.company_name = self.company_name.replace('<em>', '')
        self.company_name = self.company_name.replace('</em>', '')
        export_to_json(
            self.basic_info, json_name=f'{self.company_name}_basic_detail')

    def get_advance_info(self):
        if not self.company_name:
            print('信息搜集失败，关键词未匹配成功')
        self.app_result = self.Scan.app_query(self.pid)
        self.icp_result = self.Scan.icp_query(self.pid)
        self.domains = self.Scan.subdomains
        self.wechat_result = self.Scan.wechat_query(self.pid)
        self.copyright_result = self.Scan.copyright_info(self.pid)
        self.micro_blog = self.Scan.micro_blog_query(self.pid)
        self.supplier_result = self.Scan.supplier_query(self.pid)

    def exports(self):
        self.save_path = f'res/{datetime.date.today()}-{self.company_name}.xlsx'
        export_to_excel(self.icp_result, self.company_name,
                        sheet_name='ICP备案信息', columns_dict=self.columns_dicts)
        export_to_excel(self.app_result, self.company_name,
                        sheet_name='APP信息', columns_dict=self.columns_dicts)
        export_to_excel(self.wechat_result, self.company_name,
                        sheet_name='微信公众号信息', columns_dict=self.columns_dicts)
        export_to_excel(self.copyright_result, self.company_name,
                        sheet_name='软件著作权信息', columns_dict=self.columns_dicts)
        export_to_excel(self.micro_blog, self.company_name,
                        sheet_name='微博信息', columns_dict=self.columns_dicts)
        export_to_excel(self.supplier_result, self.company_name,
                        sheet_name='供应链信息', columns_dict=self.columns_dicts)
