# -*- coding: utf-8 -*-

import logging
import scrapy
import urllib
import codecs
import re
import jieba.analyse

from Jobin51Spider.pipelines import job51MySQLPipeline
from ..items import Jobin51SpiderItem


keyword = "Python"
# 把字符串编码成符合url规范的编码
keywordcode = urllib.parse.quote(keyword)

is_start_page = True

class EveryjobPySpider(scrapy.Spider):

    name = 'everyJob.py'
    allowed_domains = ['51job.com']
    start_urls = [
        # 测试链接
        # "https://search.51job.com/list/110300,000000,0000,00,9,99,Python,2,1.html?lang=c&stype=&postchannel=0000&workyear=99&cotype=99&degreefrom=99&jobterm=99&companysize=99&providesalary=99&lonlat=0%2C0&radius=-1&ord_field=0&confirmdate=9&fromType=&dibiaoid=0&address=&line=&specialarea=00&from=&welfare="
        # 所有职位链接
        # "https://search.51job.com/list/000000,000000,0100,01,9,99,%2B,2,2.html?lang=c&postchannel=0000&workyear=99&cotype=99&degreefrom=99&jobterm=99&companysize=99&ord_field=1&dibiaoid=0&line=&welfare="
        # 24小时内发布的职位链接
        "https://search.51job.com/list/000000,000000,0100,01,0,99,%2B,2,1.html?lang=c&postchannel=0000&workyear=99&cotype=99&degreefrom=99&jobterm=99&companysize=99&ord_field=0&dibiaoid=0&line=&welfare="
    ]

    def parse(self, response):
        print("in parse")
        # currentPageItems = response.xpath('/html/body/div[2]/div[4]/div[3]/div[@class="el"]')
        # print("currentPageItems",currentPageItems)
        # print(response.text)
        currentPageItems = response.xpath('//div[@class="el"]')
        # print("currentPageItems",currentPageItems)
        for jobItem in currentPageItems:
            # print("")
            # print('----', jobItem)
            jobspidersItem = Jobin51SpiderItem()

            jobPosition = jobItem.xpath('p[@class="t1 "]/span/a/text()').extract()
            if jobPosition:
                # print(jobPosition[0].strip())
                jobspidersItem['jobPosition'] = jobPosition[0].strip()

            jobUrl = jobItem.xpath('p[@class="t1 "]/span/a/@href').extract()
            # print("jobUrl = ", jobUrl)
            if jobUrl:
                # print("jobUrl[0].strip() = ", jobUrl[0].strip())
                jobspidersItem['jobUrl'] = jobUrl[0].strip()

            jobCompany = jobItem.xpath('span[@class="t2"]/a/text()').extract()
            if jobCompany:
                # print(jobCompany[0].strip())
                jobspidersItem['jobCompany'] = jobCompany[0].strip()

            jobArea = jobItem.xpath('span[@class="t3"]/text()').extract()
            if jobArea:
                # print(jobArea[0].strip())
                jobspidersItem['jobArea'] = jobArea[0].strip()

            jobSale = jobItem.xpath('span[@class="t4"]/text()').extract()
            if jobSale:
                # print(jobCompany[0].strip())
                jobspidersItem['jobSale'] = jobSale[0].strip()
            else:
                jobspidersItem['jobSale'] = "暂无"

            jobDate = jobItem.xpath('span[@class="t5"]/text()').extract()
            if jobDate:
                # print(jobCompany[0].strip())
                jobspidersItem['jobDate'] = jobDate[0].strip()

            # yield jobspidersItem  # 通过yield 调用输出管道
            #  print("进入内层爬虫前")
            # 爬取job的详细内容
            if jobUrl:
                # print("进入内层爬虫")
                yield response.follow(jobspidersItem['jobUrl'], meta={'item': jobspidersItem}, callback=self.parse2)
            else:
                print("jobUrl为空")
            pass
        nextPageURL = response.xpath('//li[@class="bk"]/a/@href').extract()  # 取下一页的地址
        # print(nextPageURL)
        if nextPageURL:
            url = response.urljoin(nextPageURL[-1])
            # print('url', url)
            # 发送下一页请求并调用parse()函数继续解析
            yield scrapy.Request(url, self.parse, dont_filter=False)
            pass
        else:
            print("退出")
        pass

    def parse2(self,response):
        print("in parse2")
        # print(response.text)
        jobspidersItem = response.meta['item']
        currentPageItems = response.xpath('//div[@class="tCompany_center clearfix"]')
        for infoItem in currentPageItems:
            #  print('----', infoItem)

            msg = infoItem.xpath('//p[@class="msg ltype"]/@title').extract()
            msg = "".join(msg[0].split())
            # print(msg)
            if msg:
                pattern = r'[|]'
                result = re.split(pattern, msg)
                # print(result[1], result[2])
                jobspidersItem['jobExperience'] = result[1]
                if not result[2].find("招") >= 0:
                    jobspidersItem['jobEducation'] = result[2]
                else:
                    jobspidersItem['jobEducation'] = "无学历要求"

            jobKeyword = infoItem.xpath('//div[@class="mt10"]/p[2]/a[@class="el tdn"]/text()').extract()
            if jobKeyword:
                word = ','.join(jobKeyword)
                # print(keyword)
                jobspidersItem['jobKeyword'] = word
                print("jobKeyword = ", jobKeyword)

            jobInfo = infoItem.xpath('//div[@class="bmsg job_msg inbox"]/p/text()').extract()
            info = ''.join(jobInfo)
            en_pat = re.compile('[a-zA-Z]+')
            en_words = en_pat.findall(info)
            en_words = set(en_words)
            i = 1
            e_word = ""
            for word in en_words:
                if i > 10: break
                if i == 1: e_word += word
                else: e_word += ','+word
                i+=1
            print("e_word", e_word)
            if jobKeyword:
                jobspidersItem['jobKeyword'] += ',' + e_word
            else:
                jobspidersItem['jobKeyword'] = e_word
            print("Item = ", jobspidersItem['jobKeyword'])
            if not jobKeyword and not e_word:
                print(jobInfo.text)
        yield jobspidersItem  # 通过yield 调用输出管道