# coding=utf-8
import re
import sys;

from scrapy.linkextractors.sgml import SgmlLinkExtractor
from scrapy.spiders import CrawlSpider, Rule

from scrapy_recruit.items import ScrapyRecruitItem

reload(sys);
sys.setdefaultencoding("utf-8");

class LiepinSpider(CrawlSpider):
    # 爬虫名称
    name = "liepin";
    # 设置下载延时
    download_delay = 3;
    # 允许域名
    # allowed_domains = ["news.cnblogs.com"];
    # 开始URL
    # 架构师 = %E6%9E%B6%E6%9E%84%E5%B8%88
    # 技术经理 = %E6%8A%80%E6%9C%AF%E7%BB%8F%E7%90%86
    # 技术专家 = %E6%8A%80%E6%9C%AF%E4%B8%93%E5%AE%B6
    # CTO = CTO
    # Industries： 040 = 互联网电商
    #              420 = 网络游戏
    #              010 = 计算机软件
    #              030 = IT服务
    #
    key_map = {
        '%E6%9E%B6%E6%9E%84%E5%B8%88': 100,  # 架构师
        '%E6%8A%80%E6%9C%AF%E7%BB%8F%E7%90%86': 46,  # 技术经理
        'CTO': 46,
        '%E6%8A%80%E6%9C%AF%E4%B8%93%E5%AE%B6': 30  # 技术专家
    }
    # key_list = ['%E6%9E%B6%E6%9E%84%E5%B8%88', '%E6%8A%80%E6%9C%AF%E7%BB%8F%E7%90%86', 'CTO']
    base_url = "https://www.liepin.com/zhaopin/?industries=040%2C420%2C010%2C030";
    start_urls = [base_url + ("&curPage=%d&fromSearchBtn=%d&key=%s" % (i, i + 1, key)) for key in key_map.keys() for i
                  in range(0, key_map[key])]
    # base_url = "https://www.liepin.com/zhaopin/?industries=040%2C420%2C010%2C030&key=CTO";
    # start_urls = [base_url + ("&curPage=%d&fromSearchBtn=%d&key=%s" % (i, i + 1, k)) for i in range(0, 50) for k in
    #               key_list]
    # start_urls = [base_url+ ("&curPage=%d&fromSearchBtn=%d" % (i, i+1)) for i in range(0,26)]
    # start_urls = ["https://www.liepin.com/zhaopin/?industries=040%2C420%2C010%2C030&key=%E6%9E%B6%E6%9E%84%E5%B8%88+%E6%8A%80%E6%9C%AF%E7%BB%8F%E7%90%86"];
    # 爬取规则,不带callback表示向该类url递归爬取
    rules = (
        Rule(SgmlLinkExtractor(allow=(r'https://www.liepin.com/job/\d+',)), callback='parse_content'),
    );

    @staticmethod
    def format_content(text):
        dr_remove_space = re.compile(r'\s', re.S)
        dd_remove_space = dr_remove_space.sub('', text)

        # rrr = dd_remove_space.replace('\r', '').replace(' ', '');# strip the space and newline,
        rrr = dd_remove_space.replace('<br>', '\r\n');  # replace <br> to newline symbol.
        rrr = rrr.replace('"', "'");

        html = rrr
        dr = re.compile(r'<[^>]+>', re.S)
        dd = dr.sub(' ', html)
        return dd;

    # 解析内容函数
    def parse_content(self, response):
        item = ScrapyRecruitItem();

        # REFERER
        item['job_url_referer'] = response.request.headers['Referer'].strip();

        # 当前URL
        item['job_url'] = response.url.strip();
        self.log("job url is %s ..." % response.url);

        if len(response.selector.xpath('//div[@class="title-info"]/h1/text()')) > 0:
            title = response.selector.xpath('//div[@class="title-info"]/h1/text()')[0].extract().decode('utf-8')
            item['job_title'] = title.strip();
            self.log("job title is %s ..." % item['job_title']);
        else:
            item['job_title'] = '';

        if len(response.selector.xpath('//div[@class="title-info"]/h3/a/text()')) > 0:
            company = response.selector.xpath('//div[@class="title-info"]/h3/a/text()')[0].extract().decode('utf-8')
            item['job_company'] = company.strip();
            self.log("job company is %s ..." % item['job_company']);
        else:
            item['job_company'] = '';

        if len(response.selector.xpath('//p[@class="basic-infor"]/span/a/text()')) > 0:
            location = response.selector.xpath('//p[@class="basic-infor"]/span/a/text()')[0].extract().decode(
                'utf-8')
            item['job_location'] = location.strip();
            self.log("job location is %s ..." % item['job_location']);
        else:
            item['job_location'] = '';

        if len(response.selector.xpath('//p[@class="basic-infor"]/time/@title')) > 0:
            post_time = response.selector.xpath('//p[@class="basic-infor"]/time/@title')[0].extract().decode(
                'utf-8')
            item['job_post_time'] = post_time.strip();
            self.log("job post time is %s ..." % item['job_post_time']);
        else:
            item['job_post_time'] = '';

        if len(response.selector.xpath('//div[@class="job-qualifications"]')) > 0:
            job_features = response.selector.xpath('//div[@class="job-qualifications"]')[0].extract().decode(
                'utf-8')
            item['job_features'] = LiepinSpider.format_content(job_features).strip();
            self.log("job features is %s ..." % item['job_features']);
        else:
            item['job_features'] = '';

        if len(response.selector.xpath('//div[@class="tag-list"]')) > 0:
            job_tags = response.selector.xpath('//div[@class="tag-list"]')[0].extract().decode('utf-8')
            item['job_tags'] = LiepinSpider.format_content(job_tags).strip();
            self.log("job tags is %s ..." % item['job_tags']);
        else:
            item['job_tags'] = '';

        if len(response.selector.xpath('//p[@class="job-item-title"]/text()')) > 0:
            pay = response.selector.xpath('//p[@class="job-item-title"]/text()')[0].extract().decode('utf-8')
            item['job_pay'] = pay.strip();
            self.log("pay is %s ..." % item['job_pay']);
        else:
            item['job_pay'] = '';

        content_all = "";
        # content = response.selector.xpath('//div[@class="job-item main-message"]')
        contents = response.selector.xpath('//div[contains(@class, "job-item main-message")]');

        i = 0;
        for content in contents:
            c_title = content.xpath('//h3[@class="job-title"]/text()')[i].extract().decode('utf-8');
            # print('c_title is %s ...' % c_title)
            c_content = content.xpath('//div[contains(@class, "content") or contains(@class, "info-word")]')[
                i].extract().decode('utf-8');
            # print('c_content is %s ...' % c_content)
            c_all = c_title + '\n' + c_content + '\n';
            c_all = LiepinSpider.format_content(c_all);
            # c_all = c_all.replace('\r','').replace('<br>','\r\n').replace('"',"'");
            content_all = content_all + c_all;
            # c2 = content.xpath('//h3[@class="job-title"]/text()')[0].extract().decode('utf-8');
            print('content is %s ...' % c_all)
            i = i + 1;
            pass

        item['job_content'] = content_all;
        # author = response.selector.xpath('//div[@id="news_info"]/span/a/text()')[0].extract().decode('utf-8')
        # item['author'] = author
        #
        # releasedate = response.selector.xpath('//div[@id="news_info"]/span[@class="time"]/text()')[0].extract().decode(
        #     'utf-8')
        # item['releasedate'] = releasedate

        yield item

### liepin with login method...
    # def start_requests(self):
    #     print("start request...");
    #     return [Request('https://www.liepin.com/',
    #                     meta={'cookiejar': 1},
    #             callback=self.post_login)]
    #     pass

    # def after_login(self, response):
    #     print('in the after login phase...')
    #     for url in self.start_urls:
    #         print(url);
    #         yield self.make_requests_from_url(url)
    #
    # def make_requests_from_url(self, url):
    #     print('in the make_requests_from_url phase...')
    #     html = urllib2.urlopen(url).read();
    #     return Request(url, meta={'cookiejar': 1}, callback='test');
    #     pass
    #
    # def test(self, response):
    #     print('in the test phose...')
    #     print(response);
    #     pass
    # def post_login(self, response):
    #     print 'Preparing login====', response.url
    #     # s = 'index_nav'
    #     # html = urllib2.urlopen(response.url).read()
    #     return [FormRequest.from_response(response,
    #                                       meta={'cookiejar': response.meta['cookiejar']},
    #                                       formdata={
    #                                           'layer_from': 'wwwindex_rightbox_new',
    #                                           # 'source': s,
    #                                           'user_pwd': '48fceefcf84be753a21cc90119b58295',
    #                                           'user_login': '13047878949',
    #                                           'chk_remember_pwd': 'on'
    #                                       },
    #                                       callback=self.after_login,
    #                                       dont_filter=True)
    #             ]
    #     # print 'htnl:', html
    #     # 验证码图片地址
    #     # imgurl = re.search('<img id="captcha_image" src="(.+?)" alt="captcha" class="captcha_image"/>', html)
    #     # if imgurl:
    #     #     url = imgurl.group(1)
    #     #     # 将图片保存至同目录下
    #     #     res = urllib.urlretrieve(url, 'v.jpg')
    #     #     # 获取captcha-id参数
    #     #     captcha = re.search('<input type="hidden" name="captcha-id" value="(.+?)"/>', html)
    #     #     if captcha:
    #     #         vcode = raw_input('请输入图片上的验证码：')
    #     #         return [FormRequest.from_response(response,
    #     #                                           meta={'cookiejar': response.meta['cookiejar']},
    #     #                                           formdata={
    #     #                                               'source': 'index_nav',
    #     #                                               # 'source': s,
    #     #                                               'form_email': 'your_email',
    #     #                                               'form_password': 'your_password',
    #     #                                               'captcha-solution': vcode,
    #     #                                               'captcha-id': captcha.group(1),
    #     #                                               'user_login': '登录'
    #     #                                           },
    #     #                                           callback=self.after_login,
    #     #                                           dont_filter=True)
    #     #                 ]
    #
    # pass