# -*- coding: utf-8 -*-
from django.core.management.base import BaseCommand, CommandError
from  spider.common.job51 import  Job51,URLResponse
from  spider.common.zhaopin import  ZhaoPin
from  spider.models import  Company,Job
import time
from django.db.models import Q 
from bs4 import BeautifulSoup
import time
import re

class Command(BaseCommand):
    webs = {
        'zhaopin' : "http://sou.zhaopin.com/jobs/searchresult.ashx?jl=%s&kw=%s&sm=0&p=%s&isfilter=0&fl=489&isadv=0&sg=66ca6e946598425c88ba7eadad41a314",
        #'51jobs' :"http://search.51job.com/jobsearch/search_result.php?fromJs=1&keyword=%s&keywordtype=2&lang=c&stype=2&postchannel=0000&fromType=1"
        #'zhaopin' : "http://sou.zhaopin.com/jobs/searchresult.ashx?jl=%s&kw=%s&p=1&kt=2&isadv=0",
        '51jobs' : "https://search.51job.com/list/000000,000000,0000,00,9,99,%s,2,1.html",
    }
    
    def runUrls(self):
        """
        url获取
        """
        lists = self.read_log('D:/will/djgo/20180708fc02.csv')
        
        for ls in lists:
            try:
                d = ls.split(',')
                print(d)
                if d[1] in '智联招聘':
                    url = (self.webs['zhaopin'] % (d[0],d[2],1))
                    ZhaoPin().urls(url, d)
                    print(url)
                elif d[1] in '前程无忧':
                    url = (self.webs['51jobs'] % (d[2]))
                    print(url)
                    Job51().urls(url, d)
                #延迟10秒    
                time.sleep(8)
            except:
                time.sleep(5)

    
    def runJob(self):
        """
        招聘信息
        """
        while True:
            jobs = Job.objects.filter(~Q(is_runned= '1'))[:100]
            if not jobs:
                break

            for job in jobs:
                try:
                    if '51job.com' in job.url:
                        Job51().info(job)
                    elif 'zhaopin.com' in job.url:
                        ZhaoPin().info(job)

                    #延迟10秒    
                    time.sleep(8)    
                except:
                    time.sleep(6)  
                job.is_runned = 1
                job.save()
     
    def runCompany(self):
        """
        企业信息
        """
        while True:
            companys = Company.objects.filter(is_runned = 0)[:200]
            if not companys:
                break

            for company in companys:
                try:
                    print(company.url)
                    if '51job.com' in company.url:
                        Job51().company(company)
                    elif 'zhaopin.com' in company.url:
                        ZhaoPin().company(company)
                    #延迟10秒    
                    time.sleep(6)   
                except:
                    time.sleep(6)   
                company.is_runned = 1
                company.save()

            
    def handle(self, *args, **options):
        self.runJob()

#        公司url 职位url提取入库
        #"""
#        for i in range(1,3):
#            all_the_text = URLResponse('http://jobs.51job.com/all/co2579917.html').getHtml('post', {"pageno" : i, "hidTotal":22} )
#            soup = BeautifulSoup(all_the_text.content.decode('gbk'))
#
#            #职位网址正则
#            jUrlsCompiles = [
#                re.compile("http://jobs.51job.com/([a-z]|[\-])+/([0-9]+\.html)"),
#            ]
#            urlsObs = soup.find_all(href = jUrlsCompiles)
#            if urlsObs:
#                for ulr in urlsObs:
#                    print(ulr)
#                    jobInfo = Job.objects.filter(url=ulr['href'])
#                    if not jobInfo:
#                        jInstance = Job.objects.create(**{
#                            "url" : ulr['href'],
#                            "name" : ulr.text.strip(),
#                            'company_id' : 586
#                        })
#                        print(ulr['href'])
#                    else:
#                        print("51jobs has existed:%s" % ulr['href'])

    def read_log(self,file):
        f = open(file,'r',encoding='utf-8')
        return f.readlines()
            
    def write_log(self, file, content):
        f = open(file,'a',encoding='utf-8')
        f.write(content)
  


    