# coding=utf-8
from bs4 import BeautifulSoup
import time
import re
from  spider.models import  Company,Job
import requests

class URLResponse:
    headers = {
        'Host':"search.51job.com",
        "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8",
        "Accept-Encoding": "gzip, deflate",
        "Accept-Language": "zh-CN,zh;q=0.9",
        "Connection": "keep-alive",
        "User-Agent": "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/62.0.3202.75 Safari/537.36",
        "Content-Type" : "application/x-www-form-urlencoded",
        "Origin" : "http://search.51job.com",
        "Cache-Control" : "max-age=0",
        "Upgrade-Insecure-Requests" : "1"
    }
    def __init__(self, url):
        self.url = url
        
    def getHtml(self, method = 'get', data = {}):
        if method == 'get':
            return requests.get(self.url, headers = self.headers, timeout = 10)
        elif method == 'post':
            return requests.post(self.url, headers = self.headers, data = data, timeout = 10)



class Job51:
    
    def urls(self, url, d):
        """
        公司url 职位url提取入库
        """
        all_the_text = URLResponse(url).getHtml()
        
        soup = BeautifulSoup(all_the_text.content.decode('gbk'))
        nomsg = soup.find('p', class_='dw_nomsg')
        if nomsg:
            cInstance = Company.objects.create(**{
                    "url" : '',
                    "name" : d[2],
                    "email" : d[4],
                    "phone" : d[3],
                    "web" : d[5],
                    "area" : '%s|%s' % (d[0],d[1]),
            })
            return ""
        #公司网址正则
        cUrlsCompiles = [
            re.compile("https://jobs.51job.com/all/co([0-9]+\.html)"),
        ]
        urlsObs = soup.find_all(href = cUrlsCompiles)
        if urlsObs:
            for ulr in urlsObs:
                companys = Company.objects.filter(name=d[2])
                if not companys:
                    cInstance = Company.objects.create(**{
                        "url" : ulr['href'],
                        "name" : ulr.text,
                        "email" : '',
                        "phone" : '',
                        "web" : '',
                        "area" : '%s|%s' % (d[0],d[1]),
                    })
                    print({
                        "url" : ulr['href'],
                        "name" : ulr.text,
                        "email" : '',
                        "phone" : '',
                        "web" : '',
                        "area" : '%s|%s' % (d[0],d[1]),
                    })
                else:
                    cInstance = companys[0]
                break
                
            #职位网址正则
            jUrlsCompiles = [
                re.compile("https://jobs.51job.com/([a-z]|[\-])+/([0-9]+\.html)"),
            ]
            urlsObs = soup.find_all(href = jUrlsCompiles)
            if urlsObs:
                for ulr in urlsObs:
                    jobInfo = Job.objects.filter(url=ulr['href'])
                    if not jobInfo:
                        jInstance = Job.objects.create(**{
                            "url" : ulr['href'],
                            "name" : ulr.text.strip(),
                            'company_id' : cInstance.id
                        })
                        print("51jobs: %s" % jInstance.id)
                    else:
                        print("51jobs has existed:%s" % ulr['href'])
    
    def _filter(self, strs):
        if strs:
            return strs.strip()
        return ""
    
    def info(self, job):
        """
        职位详细入库
        """
        print(job.url)
        #try:
        all_the_text = URLResponse(job.url).getHtml()
        try:
            soup = BeautifulSoup(all_the_text.content.decode('gbk'))
            #tHeader tHjob h1
            try:
                tHeader = soup.find('div', class_="tHeader")
                job.name = self._filter(tHeader.find('h1').text)
                job.money = tHeader.find('strong').text
            except:
                job.name = 'fail'
            #jtag inbox
            try:
                jtags = soup.find('div', class_="jtag inbox")
                sp4 = jtags.find_all('span', class_="sp4")
                for sp in sp4:

                    if 'i2' in sp.find('em')['class']:
                        job.school = self._filter(sp.text)
                    elif 'i3' in sp.find('em')['class']:
                        job.number = self._filter(sp.text)
                    elif 'i4' in sp.find('em')['class']:
                        job.dated = self._filter(sp.text)
                    elif 'i1' in sp.find('em')['class']:
                        job.exper = self._filter(sp.text)
            except:
                pass

            #p t2
            try:
                spans = jtags.find("p",class_="t2").find_all('span')
                liangdian = []
                for s in spans:
                    liangdian.append(self._filter(s.text))
                job.liangdian = ",".join(liangdian)
            except:
                pass

            #
            try:
                job.desc = self._filter(soup.find('div', class_="bmsg job_msg inbox").text)
            except:
                job.desc = ''

            try:
                job.address = self._filter(soup.find('div', class_="bmsg inbox").find('p').text.replace("上班地址：",""))
            except:
                job.address = ''
            job.runed = 1
            job.save()
        except:
            job.name = 'failed'
            job.runed = 1
            job.save()
        

    def company(self, company):
        """
        企业信息
        """
        print(company.url)
        try:
            all_the_text = URLResponse(company.url).getHtml()
            soup = BeautifulSoup(all_the_text.content.decode('gbk'))
            ltypes = soup.find('div', class_="tHeader tHCop").find('p', class_="ltype").text
            cs = ltypes.split('|')
            company.industry = self._filter(cs[2])
            company.size = self._filter(cs[1])
            company.typs = self._filter(cs[0])

            inst = soup.find("div", class_="con_msg")
            company.profile = self._filter(inst.text)
            company.address = self._filter(soup.find('div', class_='tBorderTop_box bmsg').find('p',class_='fp').text.replace('公司地址：',''))
            company.save()
        except:
            company.typs = 'failed'
            company.save()


