# -*- coding:utf-8 -*-
import requests
import User_Agent_list
import random
import jsonpath
import csv
import time
import sys
import re
import os
from lxml import etree

reload(sys)
sys.setdefaultencoding("gbk")


class LaGou(object):
    def __init__(self, job_name='', job_location='', job_page=1):
        self.ajax_url = "https://www.lagou.com/jobs/positionAjax.json"
        self.base_url = "https://www.lagou.com/jobs/"
        self.job_location = job_location
        # self.html_url = "https://www.lagou.com/jobs/list_%E7%88%AC%E8%99%AB?labelWords=&fromSearch=true&suginput="
        self.job_page = job_page
        self.job_name = job_name
        self.headers = {
            'Accept': "application/json, text/javascript, */*; q=0.01",
            'Accept-Encoding': "gzip, deflate, br",
            'Accept-Language': "zh-CN,zh;q=0.9",
            'Connection': "keep-alive",
            'Content-Type': "application/x-www-form-urlencoded; charset=UTF-8",
            'Host': "www.lagou.com",
            'Origin': "https//www.lagou.com",
            'Referer': "https//www.lagou.com/jobs/list_%E7%88%AC%E8%99%AB?labelWords=&fromSearch=true&suginput=",
            'User-Agent': random.choice(User_Agent_list.USER_AGENT_LIST),
            'X-Anit-Forge-Code': "0",
            'X-Anit-Forge-Token': "None",
            'x-Requested-With': "XMLHttpRequest"
        }
        self.proxies = {}
        self.result = []

    def spider_job(self):
        querystring = {"px": "default", "city": self.job_location, "needAddtionalResult": "false",
                       "isSchoolJob": "0"}
        for i in range(1, self.job_page + 1):
            payload = {"first": False, "pn": i, "kd": self.job_name}

            if i == 1:
                payload["first"] = True
            response = requests.post(self.ajax_url, data=payload, headers=self.headers, params=querystring,
                                     proxies=self.proxies)
            response_json = response.json()
            results_list = jsonpath.jsonpath(response_json, "$..result")[0]
            total_count = int(jsonpath.jsonpath(response_json, "$..totalCount")[0])
            s = lambda total_count: total_count // 15 if total_count % 15 == 0 else (total_count) // 15 + 1
            max_page = s(total_count)
            if i > max_page:
                self.job_page = '共%d' % max_page
                print '所有页爬取完毕'
                break
            for results in results_list:
                item = {}
                item["companyFullName"] = results["companyFullName"]
                item["positionName"] = results["positionName"]
                item["createTime"] = results["createTime"]
                item["city"] = results["city"]
                item["district"] = results["district"]
                item["salary"] = results["salary"]
                item["positionAdvantage"] = results["positionAdvantage"]
                item["workYear"] = results["workYear"]
                item["education"] = results["education"]
                item["jobNature"] = results["jobNature"]
                item["positionId"] = results["positionId"]
                self.result.append(item)
            print '第%d页爬取完成' % i
            time.sleep(random.randint(2, 5))

    def write_csv(self):
        title = self.result[0].keys()
        contents = [v.values() for v in self.result]
        # filename = ""
        file_name = self.job_name + "_" + self.job_location + "_" + str(self.job_page) + '页' + "_" + time.strftime(
            '%Y%m%d_%H%M%S', time.localtime(time.time())) + '.csv'
        save_name = file_name.decode('utf-8').encode('gbk')
        csv_file = file("./lagou_data/" + save_name, "w")
        csv_writer = csv.writer(csv_file)
        csv_writer.writerow(title)
        print contents
        contents = str(contents).replace(u'\\u200b', '')
        contents = eval(contents)

        csv_writer.writerows(contents)
        csv_file.close()

    def spider_job_description(self, file_name):
        # 从文件中读取数据
        read_file_csv = csv.reader(open("./lagou_data/" + file_name, "r"))
        description_list = []
        txt_file = file("./lagou_data/description_" + file_name[:-3] + '.txt', "a")
        url_txt_file = file("./lagou_data/url_description_" + file_name[:-3] + '.txt', "a")
        for i in read_file_csv:
            new_url = self.base_url + str(i[3]) + ".html"
            print new_url
            response = requests.get(new_url, headers=self.headers)
            d_html = response.content
            d_xml = etree.HTML(d_html)
            description = d_xml.xpath("//dd[@class='job_bt']/div")
            try:
                description = description[0].xpath('string(.)')
                # print description[0].xpath('string(.)')
                p = re.compile(r'<.*?>|&.*?;|\s|\t|\n|\r|&nbsp;| ')
                s = p.sub('', description).replace(u'\xa0', '')
                # print s.encode('utf-8')
                description_list.append(s)
                print s
                txt_file.write(str(s) + '\n')
                url_txt_file.write(new_url + str(s) + '\n')
                print '写入完成！'
            except Exception as e:
                print e
            time.sleep(0.8)
        txt_file.close()
        url_txt_file.close()


if __name__ == '__main__':
    s = raw_input("执行操作（1.抓职位 2.收集描述）：")
    if s == '1':
        job_name = raw_input("查询职位名称：")
        job_location = raw_input("查询地点：")
        job_page = int(raw_input("查询页数："))
        l = LaGou(job_name, job_location, job_page)
        l.spider_job()
        l.write_csv()
    elif s == '2':
        a = os.listdir('./lagou_data')
        for i in range(1, len(a) + 1):
            print str(i) + '.' + a[i - 1].encode('utf-8')
        file_num = int(raw_input('输入要收集的文件序号'))
        file_name = a[file_num - 1]
        l = LaGou()
        l.spider_job_description(file_name)
    else:
        print "参数错误"
