# coding: utf8

import os
import logging
import datetime
import re
import traceback
import mysql.connector

import requests_html
from multiprocessing.pool import ThreadPool


logging.basicConfig(level=logging.INFO)

database_settings = {
    "host": "127.0.0.1",
    "port": 3306,
    "user": 'gt',
    "password": "root",
    "database": "icexpert"
}

# html session, 执行网络请求
session = requests_html.HTMLSession()
# 线程池执行网络请求
pool = ThreadPool(os.cpu_count() * 2)

# 数据库
db = mysql.connector.connect(**database_settings)
cursor = db.cursor()


def teacher_list():
    logging.info("teacher list: pku")
    try:
        r = session.get("http://www.ss.pku.edu.cn/index.php/teacherteam/teacherlist")
        if r.status_code == 200:
            # 院士
            academician = r.html.xpath('//div[@class="ss-general-info"]//h4[text()="院士："][1]/following-sibling::p[@class="p_font_mid clearfix"][1]/*[@class="block-text"]')
            # 教授
            professor = r.html.xpath('//div[@class="ss-general-info"]//h4[text()="教授："][1]/following-sibling::p[@class="p_font_mid clearfix"][1]/*[@class="block-text"]')
            # 副教授
            associate = r.html.xpath('//div[@class="ss-general-info"]//h4[text()="副教授："][1]/following-sibling::p[@class="p_font_mid clearfix"][1]/*[@class="block-text"]')
            # print(academician, professor, associate)
            details_urls = []
            for position, nodes in zip(["院士", "教授", "副教授"], [academician, professor, associate]):
                for node in nodes:
                    logging.info("name: %s, position_zh: %s, href:[%s] %s", node.text, position, node.tag, node.attrs.get("href"))
                    sql = "insert into `tb_expert`(`name_zh`, `position_zh`, `org`, `source`) VALUES(%s, %s, %s, %s)"
                    cursor.execute(sql, (node.text, position, "北京大学", "北京大学"))
                    db.commit()

                    insert_id = cursor.lastrowid
                    if node.attrs.get("href") is not None:
                        details_urls.append(("http://www.ss.pku.edu.cn" + node.attrs.get("href"), node.text, insert_id))
                pass
            logging.info("detail urls: %s", details_urls)
            # get teacher detail
            for (url, name, insert_id) in details_urls:
                logging.info("apply aysnc: %s", name)
                pool.apply_async(teacher_detail, (url, name, insert_id))
        else:
            logging.warning("response status : %d", r.status_code)
            r.raise_for_status()
    except Exception as e:
        traceback.print_exc()
        raise e


def __ascii_to_char(s: str):
    s = s.replace("&#", " ").replace(";", " ").split(" ")
    res = []
    for r in s:
        if r == " ":
            continue
        elif r.isalpha():
            res.append(r)
        elif r.isdigit():
            res.append(chr(int(r)))
    return "".join(res)


def __extract_email(s):
    # s = " var addy51857 = 'n&#105;xm' + '&#64;'; addy51857 = addy51857 + 'ss' + '&#46;' + 'pk&#117;' + '&#46;' + '&#101;d&#117;' + '&#46;' + 'cn'; var addy_text51857 = 'n&#105;xm' + '&#64;' + 'ss' + '&#46;' + 'pk&#117;' + '&#46;' + '&#101;d&#117;' + '&#46;' + 'cn';"
    pattern = "var addy[\S\d]+ = ([\s\S]*?(';))"
    addy = re.search(pattern, s).group(1).rstrip(';').replace("'", "").split("+")
    addy = "".join([__ascii_to_char(s) for s in addy])
    pattern = "addy[\S\d]+ = addy[\S\d]+ \+ ([\s\S]*?(';))"
    addr = re.search(pattern, s).group(1).rstrip(';').replace("'", "").split("+")
    addr = "".join([__ascii_to_char(s) for s in addr])
    return addy + addr


def teacher_detail(url, name, row_id):
    logging.info("teacher detail: %s", name)
    try:
        r = session.get(url)
        # r.html.render()  # 调用 chromium 渲染 JS

        avatar = r.html.xpath('//*[@id="content"]//div[@class="span3 a_photo"][1]//img[@class="img-polaroid"][1]/@src')
        avatar = "www.ss.pku.edu.cn" + avatar[0] if avatar else ''
        name_zh = r.html.xpath('//*[@class="teacher-name"][1]/text()')
        name_zh = name_zh[0] if name_zh else name
        lis = r.html.xpath('//*[@id="content"]//div[@class="span3 a_photo"][1]//ul[1]//li')
        position_zh = lis[0].text
        department = lis[1].text
        tel = ''
        email = ''
        for li in lis[2:]:
            if li.text.startswith("联系电话：") or li.text.startswith("Tel: "):
                tel = li.text.strip("联系电话：").strip("Tel: ")
            if li.xpath('*[@class="p_font_small"]'):
                # print(li.xpath('*[@class="p_font_small"]')[0].text)
                # 页面上的 email 是由 JS 渲染后得到的
                # print(li.text)
                email = __extract_email(li.text)
                pass
        logging.info("info: %s, position: %s, department: %s, tel: %s, email: %s, avatar: %s", name_zh, position_zh, department, tel, email, avatar)
        sql = "UPDATE INTO `tb_expert` SET `name_zh`=%s, `position_zh`=%s, `department`=%s, `tel`=%s, `email`=%s, `avatar`=%s WHERE `id`=%s"
        cursor.execute(sql, (name_zh, position_zh, department, tel, email, avatar, row_id))
        db.commit()
    except Exception as e:
        traceback.print_exc()
        raise e


# http://www.ss.pku.edu.cn/index.php/teacherteam/teacherlist
def main():
    teacher_list()

    pool.close()
    pool.join()

    session.close()

    cursor.close()
    db.close()
    pass


if __name__ == "__main__":
    main()

