# -*- encoding: utf-8 -*-
"""
@File    :   shixiseng.py
@Time    :   2020/07/13 16:17:13
@Author  :   Song Zewen 
@Version :   1.0
@Contact :   stg1205@163.com
@License :   (C)Copyright 2020-2021, Liugroup-NLPR-CASIA
@Desc    :   实习僧网站爬虫，包括反字体反爬，下载信息
"""


import math
import .crawler_util as cu
from fontTools.ttLib import TTFont
import base64
import re
from bs4 import BeautifulSoup
import requests
import xlwt
import time


font_file = './crawler/shixi.ttf'
excel_path = './dataset/{}.xls'
base_url = 'https://www.shixiseng.com/interns?&city={}'
# 直接用ajax的json返回格式
xhr_url = 'https://www.shixiseng.com/app/interns/search/v2?&page={page}&city={city}'
detail_url = 'https://www.shixiseng.com/intern/{}?pcm=pc_SearchList'
cities = ('北京', '上海', '广州', '深圳', 
          '成都', '杭州', '重庆', '武汉', '西安', '苏州', '天津', '南京', 
          '长沙', '海南', '桂林', '柳州')


def save_ttf():
    
    html = cu.get_html(base_url)
    # print(html)
    
    if html:
        
        # random fonts strategy
        rand = re.findall('myFont.*src.*url\(/interns/iconfonts/file\?rand=(.*)\);\}', html)
        # print(rand)
        if rand:
            font_src = 'https://www.shixiseng.com/interns/iconfonts/file?rand=' + rand[0]
            bin_data = requests.get(font_src).content
        else:
            find_base64 = re.compile('base64,(.*)"\)')
            # 拿到base64的编码
            base64_str = re.findall(find_base64, html)
            
            # print(base64_str)
            
            # 解码成二进制stream并保存
            bin_data = base64.b64decode(base64_str[0])
        
        with open(font_file, 'wb') as f:
            f.write(bin_data)
            f.close()
        
        print('----------------------successfully download ttf-------------')
    else:
        print('--------------------Fail to get page-------------------------')


def get_font_dict(font_file):
    
    font = TTFont(font_file)
    # font.saveXML('shixi.xml')  # 用于观察映射
    
    # 会生成10进制code 需转化为16进制
    ccmap = font['cmap'].getBestCmap()
    # print(ccmap)
    new_map = {}
    for k, v in ccmap.items():
        key = hex(k)
        value = v.replace('uni', '')
        a = 'u' + '0' * (4 - len(value)) + value
        new_map[key] = a
        
    # print(new_map)
    new_map.pop('0x78')  # 0x78 - x 无用
    
    # eval直接计算出unicode对应的值
    for k, v in new_map.items():
        new_map[k] = eval('u' + "\'\\" + v + "\'")
        # print('u' + "\'\\" + v + "\'")  # u'\u0054'
    # print(new_map)
    
    # 页面上有特殊字符，替换掉，保存字典
    decode_dict = {}
    for k, v in new_map.items():
        key = k.replace('0x', '&#x')
        decode_dict[key] = v 
    
    return decode_dict
    

# 解码    
def decode_str(s, decode_dict):
    
    for k, v in decode_dict.items():
        if k in s:
            s = s.replace(k, v)
    
    return s
        

# 详情页信息搜集
def get_detail_page(url):
    
    
    html = cu.get_html(url)
    
    if html:
        soup = BeautifulSoup(html, 'html.parser')
        detail = soup.select('.job_detail')
        if detail:
            detail = cu.filter_tags(str(detail[0]))
        else:
            detail = ' '
            
        edu = soup.select('.job_academic')
        if edu:
            edu = edu[0].text
        else:
            edu = ' '
        
        return detail, edu
    
    return ' ', ' '


# 得到某一页面下的所有数据
def get_page(data, data_list, decode_dict):
    
    for item in data:
        
        min_salary = int(decode_str(item['minsal'], decode_dict))
        max_salary = int(decode_str(item['maxsal'], decode_dict))
        day = int(decode_str(item['day'], decode_dict))
        month_num = int(decode_str(item['month_num'], decode_dict))
        
        name = decode_str(item['name'], decode_dict)
        i_tags = decode_str(','.join(item['i_tags']), decode_dict)
        if not i_tags:
            i_tags = ' '
        industry = decode_str(item['industry'], decode_dict)
        cname = decode_str(item['cname'], decode_dict)
        c_tags = decode_str(','.join(item['c_tags']), decode_dict)
        if not c_tags:
            c_tags = ' '
        uuid = item['uuid']
        url = detail_url.format(uuid)
        detail, edu = get_detail_page(url)
        
        job_info = [cname, c_tags, name, i_tags, day, month_num, 
                    edu, industry, min_salary, max_salary, url, detail]
        
        data_list.append(job_info)
    

# 从首页中获取页数，并保存页面信息到data_list中
def get_pgone(data_list, city, decode_dict):
    
    start = 1
    count = 0
    while count < 5:
        url = xhr_url.format(city=city, page=start)
        res = cu.get_json(url)
        if res:
            msg = res['msg']
            pageNum = math.ceil(msg['total'] / msg['pageNumber'])
            data = msg['data']
            get_page(data, data_list, decode_dict)
            return pageNum, start
        else:
            start = start + 1
            count = count + 1
            
    raise Exception('----------------连续5页获取失败，可能被墙了----------------')
    
    
# 存储一个城市的数据到excel中（不太稳定的感觉）
def save_city(city, data_list):
    
    book = xlwt.Workbook(encoding='utf-8', style_compression=0)
    sheet = book.add_sheet(city, cell_overwrite_ok=True)
    col = ('公司', '公司标签', '职位', '职位标签', 
           '天/周', '时长', '学历', '行业', 
           '最低工资', '最高工资', '链接', '详细描述')
    for i in range(len(col)):
        sheet.write(0, i, col[i])
        
    for i in range(len(data_list)):
        job_info = data_list[i]
        for j in range(len(col)):
            sheet.write(i + 1, j, job_info[j])
    
    book.save(excel_path.format(city))


if __name__ == "__main__":
    
    decode_dict = get_font_dict(font_file)  # 一天变一次？
    
    for city in cities:
        
        data_list = []
        
        # 在第一页里获得pageNum
        # 如果映射改变，能catch到ValueError，重新加载字体文件下载页面
        try:
            pageNum, start = get_pgone(data_list, city, decode_dict)
        except ValueError:
            save_ttf()
            decode_dict = get_font_dict(font_file)
            print('---------------Font changed!-------------------')
            pageNum, start = get_pgone(data_list, city, decode_dict)
            
        print(time.strftime("--------------%Y-%m-%d %H:%M:%S---------------", time.localtime()))
        print("-------------------get pgone of {}--------------------".format(city))
        
        for i in range(start + 1, pageNum + 1):
            
            url = xhr_url.format(city=city, page=i)
            res = cu.get_json(url)
            if res:
                data = res['msg']['data']
                try:
                    get_page(data, data_list, decode_dict)
                except ValueError:
                    save_ttf()
                    decode_dict = get_font_dict(font_file)
                    print('---------------Font changed!-------------------')
                    get_page(data, data_list, decode_dict)
                    
                print(time.strftime("--------------%Y-%m-%d %H:%M:%S---------------", time.localtime()))
                print("-------------------get page{} of {}--------------------".format(i, city))

            else:
                print(time.strftime("--------------%Y-%m-%d %H:%M:%S---------------", time.localtime()))
                print("-------------------Fail to get page{} of {}--------------------".format(i, city))
                
        print(time.strftime("--------------%Y-%m-%d %H:%M:%S---------------", time.localtime()))
        # print("{}: {}".format(city, data_list))
        save_city(city, data_list)
        print("-------------------save city: {}--------------------".format(city))