# -*- coding: utf-8 -*-
"""
Created on 2016/1/10 23:12
@File: spider_lagou.py
@Author: Liangrong Li
@
"""
import json
import urllib
import urllib2
import datetime
from pandas import Series, DataFrame
import pandas as pd
import os
import threading
import random
from Queue import Queue

# 获取所有页面的数据，保存到本地
class CrawlLagou:
    def __init__(self, keyword=None, dir_name=None):
        q = Queue()
        self.keyword = keyword
        self.dir_name = dir_name
        self.lagou_folder = 'lagou_' + dir_name   # 需要保存数据的目录
        self.user_agent = ['Mozilla/4.0 (compatible; MSIE 6.0; ) Opera/UCWEB7.0.2.37/28/999',
                           'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1)',
                           'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1)',
                           'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1)',
                           'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; 360SE)',
                           'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; Avant Browser)',
                           'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; Maxthon 2.0)',
                           'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; TencentTraveler 4.0)',
                           'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; The World)',
                           'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; Trident/4.0; SE 2.X MetaSr 1.0; SE 2.X MetaSr 1.0; .NET CLR 2.0.50727; SE 2.X MetaSr 1.0)',
                           'Mozilla/5.0 (BlackBerry; U; BlackBerry 9800; en) AppleWebKit/534.1+ (KHTML, like Gecko) Version/6.0.0.337 Mobile Safari/534.1+',
                           'Mozilla/5.0 (compatible; MSIE 9.0; Windows Phone OS 7.5; Trident/5.0; IEMobile/9.0; HTC; Titan)',
                           'Mozilla/5.0 (hp-tablet; Linux; hpwOS/3.0.0; U; en-US) AppleWebKit/534.6 (KHTML, like Gecko) wOSBrowser/233.70 Safari/534.6 TouchPad/1.0',
                           'Mozilla/5.0 (Linux; U; Android 2.3.7; en-us; Nexus One Build/FRF91) AppleWebKit/533.1 (KHTML, like Gecko) Version/4.0 Mobile Safari/533.1',
                           'Mozilla/5.0 (Linux; U; Android 3.0; en-us; Xoom Build/HRI39) AppleWebKit/534.13 (KHTML, like Gecko) Version/4.0 Safari/534.13',
                           'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.6; rv:2.0.1) Gecko/20100101 Firefox/4.0.1',
                           'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_7_0) AppleWebKit/535.11 (KHTML, like Gecko) Chrome/17.0.963.56 Safari/535.11',
                           'Mozilla/5.0 (SymbianOS/9.4; Series60/5.0 NokiaN97-1/20.0.019; Profile/MIDP-2.1 Configuration/CLDC-1.1) AppleWebKit/525 (KHTML, like Gecko) BrowserNG/7.1.18124',
                           'MQQBrowser/26 Mozilla/5.0 (Linux; U; Android 2.3.7; zh-cn; MB200 Build/GRJ22; CyanogenMod-7) AppleWebKit/533.1 (KHTML, like Gecko) Version/4.0 Mobile Safari/533.1',
                           'NOKIA5700/ UCWEB7.0.2.37/28/999',
                           'Openwave/ UCWEB7.0.2.37/28/999',
                           'Opera/9.80 (Android 2.3.4; Linux; Opera Mobi/build-1107180945; U; en-GB) Presto/2.8.149 Version/11.10',
                           'UCWEB7.0.2.37/28/999',
                           'User-Agent:Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 6.0)',
                           'User-Agent:Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0)',
                           'User-Agent:Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Trident/5.0;',
                           'User-Agent:Mozilla/5.0 (iPad; U; CPU OS 4_3_3 like Mac OS X; en-us) AppleWebKit/533.17.9 (KHTML, like Gecko) Version/5.0.2 Mobile/8J2 Safari/6533.18.5',
                           'User-Agent:Mozilla/5.0 (iPhone; U; CPU iPhone OS 4_3_3 like Mac OS X; en-us) AppleWebKit/533.17.9 (KHTML, like Gecko) Version/5.0.2 Mobile/8J2 Safari/6533.18.5',
                           'User-Agent:Mozilla/5.0 (iPod; U; CPU iPhone OS 4_3_3 like Mac OS X; en-us) AppleWebKit/533.17.9 (KHTML, like Gecko) Version/5.0.2 Mobile/8J2 Safari/6533.18.5',
                           'User-Agent:Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10_6_8; en-us) AppleWebKit/534.50 (KHTML, like Gecko) Version/5.1 Safari/534.50',
                           'User-Agent:Mozilla/5.0 (Windows NT 6.1; rv:2.0.1) Gecko/20100101 Firefox/4.0.1',
                           'User-Agent:Mozilla/5.0 (Windows; U; Windows NT 6.1; en-us) AppleWebKit/534.50 (KHTML, like Gecko) Version/5.1 Safari/534.50',
                           'User-Agent:Opera/9.80 (Macintosh; Intel Mac OS X 10.6.8; U; en) Presto/2.8.131 Version/11.11',
                           'User-Agent:Opera/9.80 (Windows NT 6.1; U; en) Presto/2.8.131 Version/11.11']
        # 判断保存下载数据的目录是否存在，不存在就创建
        if not os.path.exists(self.lagou_folder):
            os.makedirs(self.lagou_folder)

    # 把关键词生成URL形式
    def kd_encode(self, keyword):
        """

        :rtype: object
        """
        keywords = keyword.strip(' ')  # 去处关键词的空格
        value = {"kd": keywords.encode('utf-8', 'ignore')}  # 以 encoding 指定的编码格式编码字符串
        kw_data = urllib.urlencode(value)  # 生成URL格式字符串
        return kw_data

    # 获取第一页并解析总页数
    def crawl_count(self, keyword=None):
        """

        :rtype: object
        """
        kd = self.kd_encode(keyword)
        url = 'http://www.lagou.com/jobs/positionAjax.json?first=true&pn=1&%s' % kd

        try:
            resp = urllib2.urlopen(url, timeout=10)
            data = resp.read()
            # page1 写入文件
            with open(self.lagou_folder+ '/1.json', 'w')as f:
                f.write(data)

            result = json.loads(data, 'utf-8')
            total_page = result['content']['totalPageCount']
            print u'总页数： ', total_page
            print u'正在写入第1页,时间是%s，还剩下%d页' % (datetime.datetime.now(), total_page - 1)
            return total_page
        except Exception, e:
            print e

    def crawl_data(self, url, total_page, pn):
        global mutex

        try:
            threadname = threading.current_thread().getName()
            header = {
                'User-Agent': random.choice(self.user_agent)
            }

            # 取得锁
            # mutex.acquire()
            req = urllib2.Request(url, headers=header)
            resp = urllib2.urlopen(req, timeout=10)
            data = resp.read()
            with open(self.lagou_folder + '/' + str(pn) + '.json', 'w')as f:
                f.write(data)
            print threadname, u': 正在写入第%d页,时间是%s，还剩下%d页' % (pn, datetime.datetime.now(), total_page - pn), '\n'

            # 释放锁
            # mutex.release()

        except Exception, e:
            print e

    def start_crawl(self, keyword=None, thread_num=None):
        global mutex
        total_page = self.crawl_count(keyword)
        kw_data = self.kd_encode(keyword)
        # 开始正式抓取
        threads = []

        # 创建一个锁
        mutex = threading.Lock()
        # 先创建线程对象
        for pn in xrange(2, total_page + 1):
            page_num = str(pn)
            url = 'http://www.lagou.com/jobs/positionAjax.json?first=false&pn=%s&%s' % (page_num, kw_data)
            # 调用重复功能crawl_data实现多线程
            threads.append(threading.Thread(target=self.crawl_data, args=(url, total_page, pn)))

        # 启动所有线程
        for t in threads:
            t.start()
        # 主线程中等待所有子线程退出
        for t in threads:
            t.join()

    def parse_json(self, url, pn):
        global t_data
        try:
            # 获取html
            resp = urllib2.urlopen(url, timeout=10)
            data = resp.read()
            json_data = json.loads(str(data), encoding='utf-8')['content']['result']

            # 获取每个职位数据
            for key in xrange(len(json_data)):
                # 把company描述的列表合并为一个字符串
                json_data[key]['companyLabelList2'] = '-'.join(json_data[key]['companyLabelList'])
                json_data[key].pop('companyLabelList')  # 删除原职位信息

                # 将网页每一行数据做成Series，之后再合并
                if key == 0:
                    single_data = DataFrame(Series(data=json_data[key])).T
                else:
                    single_data = pd.concat([single_data, DataFrame(Series(data=json_data[key])).T])
        except Exception, e:
            print e
        print threading.current_thread().name, '|', u'正在解析页面第%d页,时间是%s' % (pn, datetime.datetime.now())
        # 构造 total_data，是所有页面的集合，single_data 是这一个页面的集合
        if pn == 1:
            t_data = single_data
        else:
            t_data = pd.concat([t_data, single_data])
        t_data.index = range(1, len(t_data) + 1)

    def start_parse(self, keyword=None, tn=10):
        global total_data
        total_page = self.crawl_count(keyword)
        threads = []
        for pn in xrange(1, total_page + 1):
            url = "file:///D:/python/python_ex/spider/%s/%s.json" % (self.lagou_folder, str(pn))

            # 调用重复功能crawl_data实现多线程
            t = threading.Thread(target=self.parse_json(url, pn))
            t.start()
            threads.append(t)
        for i in threads:
            i.join()
            print threading.current_thread().name, '|', u'正在解析页面第%d页,时间是%s，还剩下%d页' % (pn, datetime.datetime.now(), total_page - pn)
        try:
            total_data.to_excel('lagou_' + dir_name + '.xls', sheet_name='position')
        except Exception, e:
            # print u'第%d页数据写入xls文档失败: ' % pn
            print(e)

    def threads(self, keyword=None):
        """
        :param keyword:
        :return:threads
        """
        total_page = self.crawl_count(keyword)
        kw_data = self.kd_encode(keyword)
        threads = []
        for pn in xrange(2, total_page+1):
            page_num = str(pn)
            url = 'http://www.lagou.com/jobs/positionAjax.json?first=false&pn=%s&%s' % (page_num, kw_data)
            t = threading.Thread(target=self.crawl_data, args=(url, total_page, pn))
            threads.append(t)
        return threads

    def start(self, threads, thread_num, second):
        # for i in thread_num:
        #     i.start()
        # for t in thread_num:
        #     t.join
        l = len(threads)
        n = thread_num
        while l > 0:
            if l > thread_num:
                n1 = threads[:thread_num]
                threads = threads[thread_num:]
                for t in n1:
                    t.start()
                time.sleep(1)
                for t in n1():
                    t.join()
                n += thread_num
                l = len(thread_num)
                continue
            elif l < thread_num:
                n1 = threads
                for t in n1:
                    t.start()
                for t in n1:
                    t.join()
                l = 0
if __name__ == '__main__':
    import time
    start = time.clock()

    # 设置关键词
    keyword = u'爬虫'
    # 设定保存下载的数据文件夹名
    dir_name = 'pachong'
    thread_num = 10
    delay = 1
    # 实例化CrawlLagou爬虫类
    spider = CrawlLagou(keyword , dir_name)
    # 1.启动抓取数据,设置完成后，定位到文件目录，在命令终端下使用 python spider_lagou.py抓取数据
    # spider.start_crawl(keyword, thread_num)
    thread_list = spider.threads(keyword)
    start(thread_list, thread_num, delay)


    # 2.解析数据|在1完成后注释掉其代码，开启parse_json(kd)解析数据
    # spider.start_parse(dir_name, thread_num)

    end = time.clock()
    print "run time is %f s" % (end - start)
