# coding=utf-8
import json
import os
import sys
import time
import traceback
from pathlib import Path

import bs4
from selenium.webdriver import Chrome
from selenium.webdriver.common.by import By

from util import save_search_lists_of_companies_to_html, chrome_options, \
    save_file, app_log, read_company_profile, cfg, read_file, get_company_urls_from_file, \
    selenium_get_company_profile, data_save_format, setup, save_file_lines

log = app_log()

save_company_json_file_path = None

current_path = Path().resolve()
data_abs_dir = os.path.join(current_path, cfg("data_dir"))
fix_dir = os.path.join(current_path, cfg("fix_dir"))
error_count = 0


def get_lists_of_companies(key_word: str):
    """
    打开搜索页面 -> 关键词搜索公司列表 - > html列表页写入文件

    :param key_word: 搜索关键字
    :return:
    """
    log.info(f"搜索关键词:{key_word}")

    browser = Chrome(options=chrome_options())

    #  打开搜索
    url = "https://www.alibaba.com/factory/index.html"
    browser.get(url)
    browser.find_element(By.CSS_SELECTOR, ".search-bar-input").click()

    # 关键词进行搜索
    browser.find_element(By.CSS_SELECTOR, ".search-bar-input").send_keys(key_word)
    browser.find_element(By.CSS_SELECTOR, ".fy23-icbu-search-bar-inner-button > span").click()

    time.sleep(3)

    # 获取公司列表
    html = browser.page_source
    browser.quit()

    # 保存
    save_search_lists_of_companies_to_html(html)
    return


def bs4_get_company_urls_from_selenium():
    """
    使用bs4从html列表页提取URL列表写入文件

    :return:
    """

    # 从列表页中提取多少条URL
    company_count_ = cfg('company_count')
    company_count_ = int(company_count_)
    log.info(f"获取{company_count_}条公司url")
    html_content = read_file(cfg('search_lists_of_companies_html_abs_path'))

    bs = bs4.BeautifulSoup(html_content, 'html.parser')
    company_list = bs.select(".detail-info")[:company_count_]

    company_url_list = []
    for company in company_list:
        company_url = company.select("a")[0].get("href")
        company_url = f"https:{company_url}"
        company_url_list.append(company_url)
        # print(company_url)
    log.info(f"url列表为:{company_url_list}")
    urls = [url + "\n" for url in company_url_list]

    save_file_lines(urls, cfg('company_url_list_abs_path'), 'w')
    # [save_file(url + "\n", cfg('company_url_list_abs_path'), 'a') for url in company_url_list]

    return


def bs4_company_profile():
    """
    获取详情
    """

    """
    数据结构 有1 - 2

    Overview
        Company registration date : 2009-05-27
        Floor space(㎡): 1950
        Accepted languages: English
        Years exporting : 13
        Years in industry: 13

    Production capabilities
        Production lines3
        Total annual output (units)1470000
        Production machines30

    :return:
    """
    try:
        bs = bs4.BeautifulSoup(read_company_profile(), 'html.parser')
        company_url = bs.find("meta", attrs={"property": "og:url"}).attrs.get("content")
        company_logo = bs.find("meta", attrs={"property": "og:image"}).attrs.get("content")
        company_name = bs.find("meta", attrs={"property": "og:title"}).attrs.get("content")

        company_profile_bs = bs.find("dl")
        company_profiles = {}

        company_profiles['company_name'] = company_name
        company_profiles['company_url'] = company_url
        company_profiles['company_logo'] = company_logo
        dts = company_profile_bs.find_all("dt")
        profile_list = company_profile_bs.find_all("div", class_="profile-list")

        dts_len = len(dts)
        for i in range(dts_len):
            detail_dict = {}
            # 左侧分类 Overview |  Production capabilities
            left_title = dts[i].text

            # 企业证书没有文字 特殊处理
            if left_title == 'Certifications':
                certifications_img_list = []

                html_obj = bs.find_all('div', class_="next-slick-slide next-slick-active photo")

                for certification_obj in html_obj:
                    certifications_list = certification_obj.find("img")
                    src = certifications_list.attrs.get("src")
                    certifications_img_list.append(src)

                if len(certifications_img_list) != 0:
                    detail_dict['certifications_img_list'] = certifications_img_list

            # detail_dict = {}
            details = profile_list[i].select(".profile-detail")
            for detail in details:
                # 详细信息
                # print(detail.next.text)
                # print(detail.find("strong").text)
                detail_dict[detail.next.text] = detail.find("strong").text

            company_profiles[left_title] = detail_dict
        json_obj = json.dumps(company_profiles, )

        log.info(f"公司信息:{json_obj}")
        global save_company_json_file_path
        log.info(save_company_json_file_path)
        save_file(json_obj + "\n", save_company_json_file_path, 'a+')
    except Exception as e:
        log.info("报错啦!!!!!")
        exc_type, exc_value, exc_obj = sys.exc_info()
        log.info(traceback.format_exc(limit=1))
        # log.info(single_product_bs)
        global error_count
        error_count += 1
    return


def company_main():
    setup()

    keyword = cfg('keyword')

    global save_company_json_file_path,data_abs_dir
    s_path = data_save_format(str(keyword), "company")
    # 最终产品信息->文件保存路径

    data_abs_dir = os.path.join(data_abs_dir, str(keyword).replace(" ","_"))
    if not os.path.exists(data_abs_dir):
        os.mkdir(data_abs_dir)
    save_company_json_file_path = os.path.join(data_abs_dir, s_path)

    if cfg("company_profile_auto") == "true":
        log.info("全自动收集工厂相关信息.")
        # 0.清空目录

        # 1.搜索获取公司列表页
        get_lists_of_companies(key_word=keyword)

        # 2. 从公司列表页提取url
        # 2.1 解析页面获取url写入文件
        bs4_get_company_urls_from_selenium()

    url_list = get_company_urls_from_file()
    for url in url_list:
        # 2.2 获取公司页面
        selenium_get_company_profile(url)
        # 2.3 处理公司页面
        bs4_company_profile()

    """
    <meta property="og:url" content="https://lianxintoy.en.alibaba.com/factory.html">
    """


if __name__ == '__main__':
    try:
        company_main()
    except Exception as e:
        log.info("company---报错啦!!!!!")
        # exc_type, exc_value, exc_obj = sys.exc_info()
        log.info(traceback.format_exc(limit=1))
