import json
import random
import time

import pymysql
import requests
import re

from lxml import etree

# https://www.shixiseng.com/proxy-prefix/new-intern-api-host/api/interns/v3.0/company/info/wxz?build_time=1711183800142&uuid=com_leqqbu1xz6w
headers = {
    'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.3'
}

connection = pymysql.connect(host='127.0.0.1', port=3306, user='root', password='123456', db='jobinformation',
                             charset='utf8mb4', cursorclass=pymysql.cursors.DictCursor)


def get_word():
    response = requests.get(
        'https://www.shixiseng.com/interns?keyword=%E4%BA%92%E8%81%94%E7%BD%91IT&city=%E5%85%A8%E5%9B%BD&type=intern&from=menu',
        headers=headers)
    match = re.search(r'rand=([0-9.]+)', response.text, re.S)
    res1 = requests.get('https://www.shixiseng.com/interns/iconfonts/file?rand={}'.format(match.group(1)),
                        headers=headers)

    with open("utils/file.ttf", 'wb') as f:
        f.write(res1.content)

    # 将字体文件解析成xml文件
    from fontTools.ttLib import TTFont
    font = TTFont("utils/file.ttf")
    font.saveXML("utils/font.xml")


def get_dict():
    # 打开并读取font.xml
    with open('utils/font.xml') as f:
        xml = f.read()

    # 正则表达式提取code和name
    keys = re.findall('<map code="(0x.*?)" name="uni.*?"/>', xml)
    values = re.findall('<map code="0x.*?" name="uni(.*?)"/>', xml)

    word_dict = {}
    # 将name解码成中文并作为值写入字典word_dict，该字典的键为keys
    for i in range(len(values)):
        if len(values[i]) < 4:
            values[i] = ('\\u00' + values[i]).encode('utf-8').decode('unicode_escape')
        else:
            values[i] = ('\\u' + values[i]).encode('utf-8').decode('unicode_escape')
        word_dict[keys[i]] = values[i]
    return word_dict


def get_info(inn):
    dit = {}
    url = 'https://www.shixiseng.com/intern/{}?pcm=pc_SearchList'.format(inn)
    response = requests.get(url, headers=headers)
    htm = etree.HTML(response.text)
    positionDetail = ''.join(
        htm.xpath('/html/body/div[1]/div/div/div[2]/div[1]/div[2]/div[1]/div[1]/div[2]/div/text()')).replace('\n', '')
    positionAddress = ''.join(
        htm.xpath('/html/body/div[1]/div/div/div[2]/div[1]/div[2]/div[1]/div[3]/span[1]/text()')).replace('/', '')
    dit['positionDetail'] = positionDetail
    dit['positionAddress'] = positionAddress
    return dit


def spider_sxs(url):
    print('url: ' + url)
    response = requests.get(url, headers=headers)
    response_text = response.text.replace('&#', '0')  # 将源码中&#xefed=>0xefed

    for key in dict:
        response_text = response_text.replace(key, dict[key])  # 0xefed格式=>对应的字典的值

    try:
        data = json.loads(response_text.encode('raw_unicode_escape').decode('unicode_escape'))
        cursor = connection.cursor()
        for i in list(data['msg']['data']):
            salary = str(i['minsalary'] * 22 / 1000) + 'k-' + str(i['maxsalary'] * 22 / 1000) + 'k'
            companyLabelList = '，'.join([str(i) for i in list(i['i_tags'])])
            dit = get_info(i['uuid'])
            sql = (
                "INSERT INTO `shixiseng_table_1` VALUES ('{}','{}','{}','{}','{}','{}','{}','{}','{}','{}','{}','{}')"
                .format(i['uuid'], i['cname'], dit['positionAddress'], salary,
                        i['degree'], '', i['industry'], dit['positionDetail'], i['name'],
                        i['city'], i['scale'], companyLabelList))
            print(i['uuid'], i['cname'], dit['positionAddress'], salary,
                  i['degree'], '', i['industry'], dit['positionDetail'], i['name'],
                  i['city'], i['scale'], companyLabelList)
            try:
                cursor.execute(sql)
            except:
                pass
        connection.commit()
        cursor.close()
    except:
        pass


if __name__ == '__main__':
    # 总数242
    get_word()
    dict = get_dict()
    for i in range(1, 243):
        spider_sxs(
            url=f'https://www.shixiseng.com/app/interns/search/v2?build_time=1711118200169&page={i}&'
                f'type=intern&keyword=%E5%90%8E%E7%AB%AF%E5%BC%80%E5%8F%91%E5%89%8D%E7%AB%AF%E5%BC%80%E5%8F%91%E6%95%B0%E6%8D%AE%E7%A7%BB%E5%8A%A8%E5%BC%80%E5%8F%91%E4%BA%BA%E5%B7%A5%E6%99%BA%E8%83%BD%E6%B5%8B%E8%AF%95%E5%B5%8C%E5%85%A5%E5%BC%8F%E7%BD%91%E7%BB%9C%E9%80%9A%E4%BF%A1'
                f'&area=&months=&days=&degree=&official=&enterprise=&salary=-0&publishTime=&sortType=&city=%E5%85%A8%E5%9B%BD&internExtend=')
        time.sleep(random.randint(1, 6))

    connection.close()
