# -*- coding: utf-8 -*-
"""
Created on 2016/1/10 23:12
@File: spider_lagou.py
@Author: Liangrong Li
@
"""
import json
import urllib
import urllib2
import time
import datetime
from pandas import Series, DataFrame
import pandas as pd
import os
import re


# 把关键词生成URL形式
def kd_encode(keyword):
    """

    :rtype: object
    """
    keywords = keyword.strip(' ')  # 去处关键词的空格
    value = {"kd": keywords.encode('utf-8', 'ignore')}  # 以 encoding 指定的编码格式编码字符串
    kw_data = urllib.urlencode(value)  # 生成URL格式字符串
    return kw_data


# 获取总页数
def get_count(keyword=None):
    """

    :rtype: object
    """
    kd = kd_encode(keyword)
    url = 'http://www.lagou.com/jobs/positionAjax.json?first=true&pn=1&%s' % kd
    try:
        resp = urllib2.urlopen(url)
        result = json.loads(resp.read(), 'utf-8')
        total_page = result['content']['totalPageCount']
        return total_page
    except Exception, e:
        print e


# 获取所有页面的数据，保存到本地
def get_data(keyword=None, dir_name=None):
    """

    :param dir_name:
    :type keyword: object
    """
    total_page = get_count(keyword)
    print u'总页数： ', total_page

    # 建立需要保存数据的目录
    new_folder = 'lagou_' + dir_name
    os.mkdir(new_folder)

    # 开始正式抓取
    for pn in xrange(1, total_page + 1):
        # 构造请求URL
        if pn == 1:
            is_first = 'true'
        else:
            is_first = 'false'
        kw_data = kd_encode(keyword)
        page_num = str(pn)
        url = 'http://www.lagou.com/jobs/positionAjax.json?first=' + is_first + '&pn=%s&%s' % (page_num, kw_data)
        try:
            # print url
            resp = urllib2.urlopen(url)
            data = resp.read()
            with open('lagou_' + dir_name + '/' + page_num + '.json', 'w')as f:
                f.write(data)
            print u'正在写入第%d页,时间是%s，还剩下%d页' % (pn, datetime.datetime.now(), total_page - pn)
        except Exception, e:
            print e
            continue

        # 延时2秒
        time.sleep(2)


def parse_json(keyword=None, dir_name=None, sec_name=5):
    global total_data
    total_page = get_count(keyword)
    for pn in xrange(1, total_page + 1):
        url = "file:///E:/python/python_ex/spider/lagou_%s/%s.json" % (dir_name, str(pn))

        try:
            # 获取html
            resp = urllib2.urlopen(url)
            data = resp.read()
            json_data = json.loads(str(data), encoding='utf-8')['content']['result']

            # 获取每个职位数据
            for key in xrange(len(json_data)):
                # 把company描述的列表合并为一个字符串
                json_data[key]['companyLabelList2'] = '-'.join(json_data[key]['companyLabelList'])
                json_data[key].pop('companyLabelList')  # 删除原职位信息
                json_data[key].pop('adWord')
                json_data[key].pop('adjustScore')
                json_data[key].pop('calcScore')
                json_data[key].pop('companyId')
                json_data[key].pop('companyLabelList2')
                json_data[key].pop('countAdjusted')
                json_data[key].pop('createTimeSort')
                json_data[key].pop('deliverCount')
                json_data[key].pop('flowScore')
                json_data[key].pop('hrScore')
                json_data[key].pop('imstate')
                json_data[key].pop('jobNature')
                json_data[key].pop('leaderName')
                json_data[key].pop('orderBy')
                json_data[key].pop('plus')
                json_data[key].pop('positionId')
                json_data[key].pop('positonTypesMap')
                json_data[key].pop('pvScore')
                json_data[key].pop('randomScore')
                json_data[key].pop('relScore')
                json_data[key].pop('score')
                json_data[key].pop('showOrder')
                json_data[key].pop('searchScore')
                json_data[key].pop('totalCount')
                json_data[key].pop('companyLogo')
                json_data[key].pop('haveDeliver')
                json_data[key].pop('positionFirstType')
                json_data[key].pop('showCount')
                json_data[key].pop('positionAdvantage')
                # print (json_data[key]['industryField'])

                # 清除行业类别中的子类别
                industryField1 = re.match(u'[\u4e00-\u9fa5]+', json_data[key]['industryField'])
                if industryField1:
                    json_data[key]['industryField0'] = industryField1.group()
                else:
                    json_data[key]['industryField0'] = json_data[key]['industryField']
                # print industryField1.group()
                json_data[key].pop('industryField')

                # 最小值salary
                json_data[key]['salarymin'] = re.search('^(\d*?)k', json_data[key]['salary']).group(1)
                # 如果 salary 为8k以上，则 salarymax 为空值
                if re.search('-(\d*?)k$', json_data[key]['salary']) != None:
                    json_data[key]['salarymax'] = re.search('-(\d*?)k$', json_data[key]['salary']).group(1)
                else:
                    json_data[key]['salarymax'] = 0

                salarymax = int(json_data[key]['salarymax'])
                salarymin = int(json_data[key]['salarymin'])

                if json_data[key]['salarymax'] != 0:    # 如3k+
                    salarycha = float(salarymax-salarymin)
                    if salarymax <= sec_name:   # 如，2-3K,sec_num=4
                        salary_xishu = 1.0
                    elif salarymin <= sec_name < salarymax:     # 如，2-3K,sec_num=4
                        salary_xishu = float(salarymin)/sec_name + salarycha/salarymax
                    else:       # 如5-7k,sec_num=4
                        salary_xishu = float(salarymin)/sec_name + salarycha/salarymax
                else:
                    salary_xishu = float(json_data[key]['salarymin'])/sec_name

                # 重新设置区间
                if (salary_xishu-int(salary_xishu)) > 0.2:
                    json_data[key]['salary_sec'] = str(int(salary_xishu)*sec_name+1)+'k-'+str((int(salary_xishu)+1)*sec_name)+'k'
                else:
                    json_data[key]['salary_sec'] = str((int(salary_xishu)-1)*sec_name+1)+'k-'+str(int(salary_xishu)*sec_name)+'k'
                    # unsupport operand type(s) for -: 'unicode' and 'unicode'
                json_data[key]['salary_xishu'] = salary_xishu
                json_data[key].pop('salarymin')
                json_data[key].pop('salarymax')

                # 将网页每一行数据做成Series，之后再合并
                if key == 0:
                    single_data = DataFrame(Series(data=json_data[key])).T
                else:
                    single_data = pd.concat([single_data, DataFrame(Series(data=json_data[key])).T])
        except Exception, e:
            print e
            continue

        # 构造 total_data，是所有页面的集合，single_data 是这一个页面的集合
        if pn == 1:
            total_data = single_data
        else:
            total_data = pd.concat([total_data, single_data])
        total_data.index = range(1, len(total_data) + 1)
        print u'正在解析页面第%d页,时间是%s，还剩下%d页' % (pn, datetime.datetime.now(), total_page - pn)

    try:
        total_data.to_excel('lagou_' + dir_name + '.xls', sheet_name='position')
    except Exception, e:
        print u'第%d页数据写入xls文档失败: ' % pn
        print(e)


if __name__ == '__main__':
    import time
    # (min/5+(max-min)/max)*5
    # 8-15
    # (8/5+(15/8))*5
    start = time.clock()
    sec_name = 3
    # 设置关键词
    # kd = u'爬虫'
    kd = u'数据挖掘'
    # 设定保存下载的数据文件夹名
    # dir_name = 'pachong'
    dir_name = 'DataMining'

    # 1.启动抓取数据,设置完成后，定位到文件目录，在命令终端下使用 python spider_lagou.py抓取数据
    # get_data(kd, dir_name)

    # 2.解析数据|在1完成后注释掉其代码，开启parse_json(kd)解析数据
    parse_json(kd, dir_name, sec_name)

    end = time.clock()
    print "run time is %f s" % (end - start)
