import os
import re
import json
import random
import pymongo
import pandas as pd
import lxml.etree as le
import urllib.request as ur


pd.set_option('display.max_columns', 60)
d = os.path.dirname('.')
run_dir = os.path.abspath(d)
os.chdir(run_dir)


def get_user_agent(method='pc'):
    """
    模拟服务器自定义函数
    :param method:  服务器类型（客户端或移动端）
    :return: 随机服务器
    """
    # pc端的user-agent
    user_agent_pc = [
        # 谷歌
        'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/86.0.4240.183 Safari/537.36',
        'Mozilla/5.0.html (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.html.2171.71 Safari/537.36',
        'Mozilla/5.0.html (X11; Linux x86_64) AppleWebKit/537.11 (KHTML, like Gecko) Chrome/23.0.html.1271.64 Safari/537.11',
        'Mozilla/5.0.html (Windows; U; Windows NT 6.1; en-US) AppleWebKit/534.16 (KHTML, like Gecko) Chrome/10.0.html.648.133 Safari/534.16',
        # 火狐
        'Mozilla/5.0.html (Windows NT 6.1; WOW64; rv:34.0.html) Gecko/20100101 Firefox/34.0.html',
        'Mozilla/5.0.html (X11; U; Linux x86_64; zh-CN; rv:1.9.2.10) Gecko/20100922 Ubuntu/10.10 (maverick) Firefox/3.6.10',
        # opera
        'Mozilla/5.0.html (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.html.2171.95 Safari/537.36 OPR/26.0.html.1656.60',
        # qq浏览器
        'Mozilla/5.0.html (compatible; MSIE 9.0.html; Windows NT 6.1; WOW64; Trident/5.0.html; SLCC2; .NET CLR 2.0.html.50727; .NET CLR 3.5.30729; .NET CLR 3.0.html.30729; Media Center PC 6.0.html; .NET4.0C; .NET4.0E; QQBrowser/7.0.html.3698.400)',
        # 搜狗浏览器
        'Mozilla/5.0.html (Windows NT 5.1) AppleWebKit/535.11 (KHTML, like Gecko) Chrome/17.0.html.963.84 Safari/535.11 SE 2.X MetaSr 1.0.html',
        # 360浏览器
        'Mozilla/5.0.html (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/30.0.html.1599.101 Safari/537.36',
        'Mozilla/5.0.html (Windows NT 6.1; WOW64; Trident/7.0.html; rv:11.0.html) like Gecko',
        # uc浏览器
        'Mozilla/5.0.html (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/38.0.html.2125.122 UBrowser/4.0.html.3214.0.html Safari/537.36',
    ]
    # 移动端的user-agent
    user_agent_phone = [
        # IPhone
        'Mozilla/5.0.html (iPhone; U; CPU iPhone OS 4_3_3 like Mac OS X; en-us) AppleWebKit/533.17.9 (KHTML, like Gecko) Version/5.0.html.2 Mobile/8J2 Safari/6533.18.5',
        # IPAD
        'Mozilla/5.0.html (iPad; U; CPU OS 4_2_1 like Mac OS X; zh-cn) AppleWebKit/533.17.9 (KHTML, like Gecko) Version/5.0.html.2 Mobile/8C148 Safari/6533.18.5',
        'Mozilla/5.0.html (iPad; U; CPU OS 4_3_3 like Mac OS X; en-us) AppleWebKit/533.17.9 (KHTML, like Gecko) Version/5.0.html.2 Mobile/8J2 Safari/6533.18.5',
        # Android
        'Mozilla/5.0.html (Linux; U; Android 2.2.1; zh-cn; HTC_Wildfire_A3333 Build/FRG83D) AppleWebKit/533.1 (KHTML, like Gecko) Version/4.0.html Mobile Safari/533.1',
        'Mozilla/5.0.html (Linux; U; Android 2.3.7; en-us; Nexus One Build/FRF91) AppleWebKit/533.1 (KHTML, like Gecko) Version/4.0.html Mobile Safari/533.1',
        # QQ浏览器 Android版本
        'MQQBrowser/26 Mozilla/5.0.html (Linux; U; Android 2.3.7; zh-cn; MB200 Build/GRJ22; CyanogenMod-7) AppleWebKit/533.1 (KHTML, like Gecko) Version/4.0.html Mobile Safari/533.1',
        # Android Opera Mobile
        'Opera/9.80 (Android 2.3.4; Linux; Opera Mobi/build-1107180945; U; en-GB) Presto/2.8.149 Version/11.10',
        # Android Pad Moto Xoom
        'Mozilla/5.0.html (Linux; U; Android 3.0.html; en-us; Xoom Build/HRI39) AppleWebKit/534.13 (KHTML, like Gecko) Version/4.0.html Safari/534.13',
    ]
    if method == 'pc':
        return random.choice(user_agent_pc)
    elif method == 'phone':
        return random.choice(user_agent_phone)
    else:
        raise TypeError('Abnormal format: method')


def html_get(url, charset):
    """
    网页获取自定义函数
    :param url:  网页URL
    :param charset:  网页编码格式
    :return:
    """
    request = ur.Request(
        url=url,
        headers={
            'User-Agent': get_user_agent(method='pc')
        }
    )
    response = ur.urlopen(request).read().decode(charset, 'ignore')
    return response


def spider_xpath(html, pattern, output_type, split='', default=None):
    """
    xpath爬虫自定义函数
    :param html:  网页代码
    :param pattern:  爬虫代码
    :param output_type:  返回形式
    :param split:  str隔开类型
    :param default:  空值返回值
    :return:  爬取内容
    """
    html_x = le.HTML(html)
    if output_type == 'list':  # 返回列表
        content = html_x.xpath(pattern)
        return content if content else default
    elif output_type == 'str':  # 返回字符串
        content = split.join([i.strip() for i in html_x.xpath(pattern)])
        return content if content else default
    else:
        raise TypeError('Abnormal format: output_type')


# 获取第二层网页内数据
def data_get_level_two(url):
    html = html_get(url=url, charset='gbk')
    job_detail = spider_xpath(html=html, output_type='str', pattern='//div[@class="tCompany_main"]/div[@class="tBorderTop_box"][1]//text()')
    job_connect = spider_xpath(html=html, output_type='str', pattern='//div[@class="tCompany_main"]/div[@class="tBorderTop_box"][2]//text()')
    job_company = spider_xpath(html=html, output_type='str', pattern='//div[@class="tCompany_main"]/div[@class="tBorderTop_box"][3]//text()')
    data = dict(
        job_detail_fromleveltwo=job_detail,
        job_conn_fromleveltwo=job_connect,
        job_company_fromleveltwo=job_company,
    )  # 返回字典格式数据
    return data


# 获取第一层网页数据
def data_get_level_one(keyword, page, write_method):
    if write_method == 'mongo':  # 写入mongo数据库
        url = 'https://search.51job.com/list/000000,000000,0000,00,9,99,{keyword},2,{page}.html?lang=c&postchannel=0000&workyear=99&cotype=99&degreefrom=99&jobterm=99&companysize=99&ord_field=0&dibiaoid=0&line=&welfare='.format(
            keyword=keyword,
            page=page
        )
        html = html_get(url=url, charset='gbk')  # 获取网页代码
        re_result = re.findall('window.__SEARCH_RESULT__ = (.*?)</script>', html)  # 正则爬取
        datas_level_one = json.loads(re_result[0])['engine_search_result']  # json转Python
        client = pymongo.MongoClient()  # 建立mongo数据库连接
        db = client.get_database('db_job')  # 创建数据库
        c = db.get_collection('51job')  # 创建链接表
        for data_level_one in datas_level_one:
            job_href = data_level_one['job_href']  # 提取第二层网页URL
            data_level_two = data_get_level_two(url=job_href)  # 获得第二层数据
            data_level_one['job_detail_fromleveltwo'] = data_level_two['job_detail_fromleveltwo']
            data_level_one['job_conn_fromleveltwo'] = data_level_two['job_conn_fromleveltwo']
            data_level_one['job_company_fromleveltwo'] = data_level_two['job_company_fromleveltwo']
            c.insert(data_level_one)  # 插入数据
    elif write_method == 'excel':  # 写入Excel
        url = 'https://search.51job.com/list/000000,000000,0000,00,9,99,{keyword},2,{page}.html?lang=c&postchannel=0000&workyear=99&cotype=99&degreefrom=99&jobterm=99&companysize=99&ord_field=0&dibiaoid=0&line=&welfare='.format(
            keyword=keyword,
            page=page
        )
        html = html_get(url=url, charset='gbk')
        re_result = re.findall('window.__SEARCH_RESULT__ = (.*?)</script>', html)
        datas_level_one = json.loads(re_result[0])['engine_search_result']
        data = []
        for data_level_one in datas_level_one:
            job_href = data_level_one['job_href']
            data_level_two = data_get_level_two(url=job_href)
            data_level_one['job_detail_fromleveltwo'] = data_level_two['job_detail_fromleveltwo']
            data_level_one['job_conn_fromleveltwo'] = data_level_two['job_conn_fromleveltwo']
            data_level_one['job_company_fromleveltwo'] = data_level_two['job_company_fromleveltwo']
            data.append(data_level_one)
        df = pd.DataFrame.from_dict(data, orient='columns')
        return df
    else:
        raise TypeError('write_method无效')


def spider_job(keyword, start_page, end_page, write_method):
    if write_method == 'mongo':  # 写入mongo数据库
        for i in range(start_page, end_page + 1):  # 遍历每一页
            print('第 {} 页爬取中...'.format(i))
            data_get_level_one(keyword=keyword, page=i, write_method=write_method)
            print('第 {} 页爬虫成功'.format(i))
        print('爬虫完成')
    elif write_method == 'excel':  # 写入Excel
        for i in range(start_page, end_page + 1):  # 遍历每一页
            print('第 {} 页爬取中...'.format(i))
            spider_data = data_get_level_one(keyword=keyword, page=i, write_method=write_method)
            spider_data.to_excel('{}/Data/spider_job_info{}.xlsx'.format(run_dir, i), encoding='uft-8')
            print('第 {} 页爬虫成功'.format(i))
        print('爬虫完成')
    else:
        raise TypeError('write_method无效')


if __name__ == "__main__":
    # 爬取Python工作数据并写入mongo数据库
    spider_job(keyword='python', start_page=1, end_page=5, write_method='mongo')
    # 爬取Java工作数据并写入mongo数据库
    spider_job(keyword='java', start_page=1, end_page=5, write_method='mongo')




