import scrapy
from scrapy import Request
from utils.job51strUtil import *
from utils.dbUtils import DataBase

class contentSpider(scrapy.Spider):
    name = "51jobSpider"
    # allowed_domains = ["jobs.51job.com"]
    job_url='https://search.51job.com/list/000000,000000,0000,00,9,99,java,2,{page}.html?ord_field=1'

    company_sql = 'insert into 51job_company (COMPANY_NAME,CITY,AREA,ADDRESS,NATURE,SCALE,BUSINESS,COMPANY_URL,CREATE_TIME)' \
                    ' values (%s,%s,%s,%s,%s,%s,%s,%s,now())'

    company_search_sql = 'select id from  company where COMPANY_NAME=%s and COMPANY_51URL=%s '

    jobrecord_sql = 'insert into 51job_record(COMPANY_ID,JOB_NAME,MIN_SALARY,MAX_SALARY,JOB_EXPERIENCE,EDUCATION,RECRUITING_NUMBERS,JOB_INFORMATION,RELEASE_TIME,CREATE_TIME)'\
                    'values (%s,%s,%s,%s,%s,%s,%s,%s,%s,now())'
    db = DataBase()

    def start_requests(self):
        yield Request(self.job_url.format(page=1), callback=self.parse,meta={'page':1},dont_filter=True)

    def parse(self, response):
        divs = response.xpath('//div[@id="resultList"]/div[@class="el"]')
        for div in divs:
            meta = {}
            meta['jobname']  = div.xpath('p/span/a/@title').extract()[0]
            meta['company_name'] = div.xpath('span[@class="t2"]/a/@title').extract()[0]
            meta['area'] = div.xpath('span[@class="t3"]/text()').extract()[0]
            meta['salary'] =div.xpath('span[@class="t4"]/text()').extract()[0]
            meta['logdate'] = div.xpath('span[@class="t5"]/text()').extract()[0]
            campany_url = div.xpath('span[@class="t2"]/a/@href').extract()[0]
            meta['campany_url'] = campany_url
            meta['job_url'] = div.xpath('p/span/a/@href').extract()[0]
            yield Request(url=campany_url, callback=self.parse_company, meta=meta, dont_filter=True)

    def parse_company(self, response):
        meta = response.meta
        companyList=[]
        job_url = meta.get('job_url')
        campany_url = meta.get('campany_url')
        company_name = meta.get('company_name')
        areaList = meta.get('area')
        introduction = ''
        adreess= ''
        try:
            adreess = response.xpath('//p[@class="fp"]').extract()[0]
            introduction = response.xpath('//p[@class="ltype"]/@title').extract()[0]
        except Exception as e:
            pass
        companyList.append(company_name)
        companyList = companyList+area_deal(areaList)
        companyList.append(companyAdress_deal(adreess))
        companyList = companyList+introduction_deal(introduction)
        companyList.append(campany_url)

        company_search_condition=[]
        company_search_condition.append(company_name)
        company_search_condition.append(campany_url)

        record = self.db.select_one(self.company_search_sql,tuple(company_search_condition))
        if record is None:
            #插入
            company_id = self.db.insert_one(self.company_sql,tuple(companyList))
        else:
            company_id = record[0]
        meta['company_id']=company_id
        yield Request(url=job_url, callback=self.parse_job, meta=meta, dont_filter=True)

    def parse_job(self, response):
        jobresult = []
        meta = response.meta
        salary =  meta.get('salary')
        company_id = meta.get('company_id')
        jobname = meta.get('jobname')
        logdate = meta.get('logdate')
        requirement = ''
        information = ''
        try:
            requirement = response.xpath('//p[@class="msg ltype"]/@title').extract()[0]
            information = response.xpath('//div[@class="bmsg job_msg inbox"]').extract()[0]
        except Exception as e:
            pass
        jobresult.append(company_id)
        jobresult.append(jobname)
        jobresult=jobresult+salary_deal(salary)
        jobresult = jobresult+requirement_deal(requirement)
        jobresult.append(information)
        jobresult.append(logdate)
        self.db.insert_one(self.jobrecord_sql,tuple(jobresult))