import re
import time

from lxml import etree
from core.http_client import HttpClient
from core.db_utils import MysqlManager
from multiprocessing.dummy import Pool
from core.logger import root_logger
import random

db_manager = MysqlManager()
http_client = HttpClient()


def save_pic(url, name):
    try:
        if name == "":
            name = str(int(time.time())) + str(random.randint(1, 99999))
        resp = http_client.get(url)
        with open(f'E:/imgs_1/{name}.gif', 'wb') as f:
            f.write(resp.content)
        resp.close()
    except:
        pass


def get_info(_id):
    # prod_url = 'https://www.chemblink.com' + item[0]
    prod_url = f'https://www.chemblink.com/products/{_id}C.htm'
    print(prod_url)
    try:
        # prod_name = item[1]
        prod_name = ''
        resp = http_client.get(prod_url)
        response = resp.content.decode('utf-8', errors="ignore")
        html = etree.HTML(response)
        resp.close()
        trs = html.xpath('//table[@class="right"]/tr')
        en_name = ''
        img_url = ''
        bie_ming_name = ''
        fenzishi = ''
        fenziliang = ''
        cas_login_num = ''
        einecs_login_num = ''
        SMILES = ''
        InChI = ''
        InChIKey = ''
        for tr in trs:
            key_name = ''.join(tr.xpath('th[1]/text()') or tr.xpath('td[1]/text()'))
            if key_name == "产品名称":
                prod_name = ''.join(tr.xpath('th[2]/text()') or tr.xpath('td[2]/text()'))
            elif key_name == "英文名":
                en_name = ''.join(tr.xpath('td[2]/text()'))
            elif key_name == "分子结构":
                img_url = ''.join(tr.xpath('td[2]/img/@src'))
                if img_url != '':
                    img_url = 'https://www.chemblink.com' + img_url
            elif key_name == "别名":
                bie_ming_name = ''.join(tr.xpath('td[2]/text()'))
            elif key_name == "分子式":
                fenzishi = ''.join(tr.xpath('td[2]//text()'))
            elif key_name == "分子量":
                fenziliang = ''.join(tr.xpath('td[2]/text()'))
            elif key_name == "CAS 登录号":
                cas_login_num = ''.join(tr.xpath('td[2]/text()'))
            elif 'EINECS' in key_name:
                einecs_login_num = ''.join(tr.xpath('td[2]/text()'))
            elif 'SMILES' in key_name:
                SMILES = ''.join(tr.xpath('td[2]/text()'))
            elif 'InChI' in key_name:
                InChI = ''.join(tr.xpath('td[2]/text()'))
            elif 'InChIKey' in key_name:
                InChIKey = ''.join(tr.xpath('td[2]/text()'))

            if img_url != '' and img_url.endswith('.gif'):
                save_pic(url=img_url, name=cas_login_num)
        del trs, html
        sql = 'insert into chemblink_1 (prod_name, en_name, bie_ming_name, fenzishi, fenziliang, cas_login_num, einecs_login_num, SMILES, InChI, InChIKey,img_url,prod_url) values ("{}","{}","{}","{}","{}","{}","{}","{}","{}","{}","{}","{}")'.format(
            prod_name, en_name, bie_ming_name, fenzishi, fenziliang, cas_login_num, einecs_login_num, SMILES, InChI,
            InChIKey, img_url, prod_url)

        db_manager.execute_sql(sql)
    except Exception as e:
        root_logger.error(f'爬取异常：{e}，链接:{prod_url}')
        sql = 'insert into chemblink_exc_1 (url, e) values("{}","{}")'.format(prod_url, str(e))
        db_manager.execute_sql(sql)


def get_prod():
    for page in range(2636, 3176):
        print(f'开始爬取第{page}页！')
        url = f"https://www.chemblink.com/glossary/glossary{page}C.htm"
        resp = http_client.get(url)
        response = resp.content.decode('utf-8', errors="ignore")
        reg_pro = re.compile('<a href="(.*?)" class="blue" onclick="blur\(\)" target="_blank">(.*?)</a>')
        results = re.findall(reg_pro, response)
        if results:
            pool = Pool(10)
            pool.map(get_info, results)
            pool.close()
            pool.join()
        print(f'第{page}页爬取完成！')


def get_prod_ex():
    from apps.chemblink.config import dicts
    for k, v in dicts.items():
        # if k != 'i':
        #     continue
        for page in range(1, v + 1):
            print(f"正在爬取{k}的页数：" + str(page))
            url = f'https://www.chemblink.com/name/{k}{page}C.htm'
            resp = http_client.get(url)
            response = resp.content.decode('utf-8', errors="ignore")
            resp.close()
            reg_pro = re.compile(r"goPage\('(.*?)'\)")
            results = re.findall(reg_pro, response)
            del response
            pool = Pool(10)
            pool.map(get_info, results)
            pool.close()
            pool.join()
