import json
import re
from bs4 import BeautifulSoup
from concurrent.futures import ThreadPoolExecutor, wait, ALL_COMPLETED

from log.log_tools import logger
from spider import base_craw
from spider.parase import prase_url
from tools import UA_pools
from spider import parase,download

ua = UA_pools.Get_ua()
headers = {
    'Connection': 'keep-alive',
    'Upgrade-Insecure-Requests': '1',
    'User-Agent': ua.get_ua(),
    'Referer': 'https://linnean-online.org/cgi/search/advanced',
    'Accept-Language': 'zh-CN,zh;q=0.9,en;q=0.8,en-GB;q=0.7,en-US;q=0.6',
}
base_url = 'https://linnean-online.org/view/collection/'


# 通过列表推导式生成A-Z所有分页
page = [chr(letter) for letter in range(65, 91)]
format_char = '//table[@class="ep_view_cols ep_view_cols_3"]//tr//td//li/a/@href'

def get_char(iterm):
    for char in page:
        # print(char)
        main_url = base_url + iterm + 'index.' + char + '.html'
        res_html =base_craw.get_index(main_url,headers,'')
        if res_html.status_code == 200:
            child_url = parase.prase_url(res_html.text, format_char)
            for url in child_url:
                primary_url = base_url+iterm+url
                #进入图片列表
                get_url_pr(primary_url)
        else:
            print(res_html.status_code)
            print("error")
            continue



def get_url_pr(url):
    res_html = base_craw.get_index(url, headers, '')

    soup = BeautifulSoup(res_html.text, 'lxml')
    img_class_list = soup.find_all(attrs={'class': 'ep_document_link'})
    # 下载
    with ThreadPoolExecutor(max_workers=max_th_num) as executor:
        dow_tasks = [
            executor.submit(download_img, url_iterm_class) for url_iterm_class in img_class_list]
        wait(dow_tasks, return_when=ALL_COMPLETED)


def download_img(img_class):
    # 定义图片分辨率
    dpi1 = 1920
    dpi2 = 1080
    pattern = '/!(\d+),(\d+)'
    format_primary = "//a[@class='ep_document_link']/img[@class='ep_doc_icon']/@src"
    # img_class_list = parase.prase_url(url_iterm_class, format_primary)
    url_img_info = img_class.attrs['href']
    url_img = img_class.contents[0].attrs['src']
    # 高清图片url
    hd_img_url = re.sub(pattern, lambda m: '/!' + str(dpi1) + ',' + str(dpi2), url_img)
    # 获取img信息
    get_img_info(url_img_info,hd_img_url)


def get_img_info(url,src_url):
    info = {
        "uid": "",
        "uid_name": "",
        "source": "https://linnean-online.org/",
        "external id": "",
        "license": "",
        "license_type": "",
        "author": "",
        "author_link": "",
        "referral": "",
        "sha1_string": "",
        "batch_id": "DataAggregate_752",
        "download url": "",
        # "others": {
        #         # "uid_name" : "",
        #         "specimen_id": "",
        #         "MetaData": "",
        #         "iterm_type": "",
        #         "genus": "",
        #         "species": ""
        #     }
    }
    # print(iterm)
    res_html = base_craw.get_index(url,headers,'')
    print(url)
    if res_html.status_code==200:
        html = res_html.text
        try:
            format1 = "//div[@class='ep_summary_content_right']/h3/em/span[@id='title']/span/text()"
            # format_metadata = "//div[@class='ep_summary_content_right']/h3//text()"
            # format_type = "//div[@class='ep_summary_content_left']/div[1]/span[@class='md_d']/text()"
            # format_genus = "//div[@class='ep_summary_content_left']/div[2]/span[@class='md_d']/a/text()"
            # format_Species = "//div[@class='ep_summary_content_left']/div[3]/span[@class='md_d']/text()"
            # # format_uname = "//div[@class='ep_summary_content_right']/h3/em/span[@id='title']/span/text()"
            # format_specimen_id = "//div[@class='ep_summary_content_right']/h3/text()[1]"
            uid_name = parase.prase_url(res_html.text,format1)[0]
            external_id = re.findall("(\d+)",url)[0]
            sha1 = parase.sha1(src_url)

            # s = prase_url(html, format_metadata)
            # price = [x.strip() for x in s if x.strip() != '']
            # MetaData = " ".join(price)
            # iterm_type = prase_url(html, format_type)[0].strip()
            # genus = prase_url(html, format_genus)[0]
            # species = prase_url(html, format_Species)[0].strip()
            # specimen_id = prase_url(html, format_specimen_id)[0].strip()

            info["uid_name"] = uid_name
            info["external id"] = external_id
            info["referral"] = src_url
            info["sha1_string"] = sha1
            info["others"] = parase.meta_data(res_html.text)
            # info["others"]["specimen_id"] = specimen_id
            # info["others"]["MetaData"] = MetaData
            # info["others"]["iterm_type"] = iterm_type
            # if genus!='':
            #     info["others"]["genus"] = genus
            # else:
            #     info["others"]["genus"] = None
            # if species != '':
            #     info["others"]["species"] = species
            # else:
            #     info["others"]["species"] = None
            # print(info)
        except Exception as e:
            logger.error(url)
            logger.error(e)

        img_info = json.dumps(info)
        img_byte_info = bytes(img_info,encoding='utf8')
        print(img_info)

        # base_path = "test/linnean_result/" + uid_name + '/' + sha1
        # path_img = base_path + '.jpg'
        # path_txt = base_path + '.txt'
        # img_info = img_byte_info
        #
        # img_content = base_craw.get_index(src_url,'','')
        # if img_content.status_code == 200:
        #     img_bytes = img_content.content
        #     print('成功url:' + src_url)
        # else:
        #     # 修改分辨率，继续爬取
        #     src_url = re.sub('/!(\d+),(\d+)', lambda m: '/!' + str(1440) + ',' + str(900),src_url)
        #     print("修改后的url:" + src_url)
        #     try:
        #         img_content = requests.get(src_url)
        #         img_bytes = img_content.content
        #     except:
        #         pass
        #
        # # 保存文本
        # download.upload_content(img_info,path_txt)
        # # # 保存图片
        # download.upload_content(img_bytes,path_img)
        # print(src_url)
        # print("save success")






if __name__ == '__main__':
    max_th_num = 30
    # 主页基本分类
    classification = ['linnean=5Fherbarium/', 'fish/', 'shells/', 'insects/', 'smith=5Fherbarium/']
    # classification = ['fish/']
    # 开多线程爬取   提高爬取效率
    with ThreadPoolExecutor(max_workers=max_th_num) as executor:
        get_char_tasks = [
            executor.submit(get_char, iterm) for iterm in classification]
        wait(get_char_tasks, return_when=ALL_COMPLETED)
