# coding=utf-8
from bs4 import BeautifulSoup
import time
import re
from  spider.models import  Company,Job
import requests

class URLResponse:
    headers = {
        'Host':"sou.zhaopin.com",
        "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8",
        "Accept-Encoding": "gzip, deflate",
        "Accept-Language": "zh-CN,zh;q=0.9",
        "Connection": "keep-alive",
        "User-Agent": "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/62.0.3202.75 Safari/537.36"
    }
    def __init__(self, url, headers = {}):
        if headers:
            self.headers = headers
        self.url = url
        
    def getHtml(self, method = 'get', data = {}):
        if method == 'get':
            return requests.get(self.url, headers = self.headers, timeout = 10)



class ZhaoPin:
    
    def urls(self, url, d):
        """
        公司url 职位url提取入库
        """
        all_the_text = URLResponse(url).getHtml()
        soup = BeautifulSoup(all_the_text.text)
        #公司网址正则
        cUrlsCompiles = [
            re.compile("http://company.zhaopin.com/([A-Z]|[a-z]|[0-9]+\.htm)"),
            re.compile("http://special.zhaopin.com/2017/wx/"),
            re.compile("http://special.zhaopin.com/bj/"),
            re.compile("http://special.zhaopin.com/pagepublish/"),
            re.compile("http://special.zhaopin.com/tj/"),
            re.compile("http://special.zhaopin.com/2016/xa/"),
            re.compile("http://xiaoyuan.zhaopin.com/subcompany/")
        ]
        urlsObs = soup.find_all(href = cUrlsCompiles)
        if urlsObs:
            for ulr in urlsObs:
                company_name = self._filter(ulr.text)
                companys = Company.objects.filter(name=company_name)
                if not companys:
                    cInstance = Company.objects.create(**{
                        "url" : ulr['href'],
                        "name" : company_name,
                        "email" : '',
                        "phone" : '',
                        "web" : '',
                        "area" : '%s|%s|%s' % (d[0],d[1],d[2]),
                    })
                else:
                    cInstance = companys[0]
                

                #职位网址正则
                jUrlsCompiles = [
                    re.compile("http://jobs.zhaopin.com/([0-9]+\.htm)"),
                    re.compile("http://special.zhaopin.com/2017/wx/")
                ]
                urlsjobs = ulr.parent.parent.find_all(href = jUrlsCompiles)
                if urlsjobs:
                    for job_url in urlsjobs:
                        jobInfo = Job.objects.filter(url=job_url['href'])
                        if not jobInfo:
                            jInstance = Job.objects.create(**{
                                "url" : job_url['href'],
                                'company_id' : cInstance.id
                            })
                            print("zhaopin-job: %s" %jInstance.id)
                        else:
                            print("zhaopin has existed:%s" % job_url['href'])
                    
                urlsjobs = []
    
    
    def _filter(self, strs):
        if strs:
            return strs.strip()
        return ""
    
    def info(self,job):
        """
        职位详细入库
        """
        print(job.url)
        headers = {
                'Host':"jobs.zhaopin.com",
                "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8",
                "Accept-Encoding": "gzip, deflate",
                "Accept-Language": "zh-CN,zh;q=0.9",
                "Connection": "keep-alive",
                "User-Agent": "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/62.0.3202.75 Safari/537.36"
         }
        all_the_text = URLResponse(job.url, headers).getHtml()
        soup = BeautifulSoup(all_the_text.text)

        try:
            innerbox = soup.find('div', class_="fixed-inner-box")
            job.name = self._filter(innerbox.find('h1').text)
            spans = innerbox.find("div",class_="welfare-tab-box").find_all('span')
            liangdian = []
            for s in spans:
                liangdian.append(self._filter(s.text))
            job.liangdian = ",".join(liangdian)

        
            job.money = soup.find('div', class_="terminalpage-left").find('ul').find_all('li')
            sp8 = soup.find('div', class_="terminalpage-left").find('ul').find_all('li')
            for sp in sp8:
                if '最低学历' in sp.find('span').text:
                    job.school = self._filter(sp.find('strong').text)
                elif '招聘人数' in sp.find('span').text:
                    job.number = self._filter(sp.find('strong').text)
                elif '发布日期' in sp.find('span').text:
                    job.dated = self._filter(sp.find('strong').text)
                elif '工作经验' in sp.find('span').text:
                    job.exper = self._filter(sp.find('strong').text)
                elif '职位月薪' in sp.find('span').text:
                    job.money = self._filter(sp.find('strong').text)
                elif '工作地点' in sp.find('span').text:
                    job.address = self._filter(sp.find('strong').text)
            job.desc = self._filter(soup.find('div', class_="tab-inner-cont").text)

            if not job.address:
                job.address = self._filter(soup.find('div', class_="tab-inner-cont").find('h2').text)
            job.runed = 1
            job.save()
            print("end!!")
        except:
            job.runed = 1
            job.name = 'fail'
            job.save()
        

    def company(self, company):
        """
        企业信息
        """
        print(company.url)
        headers = {
                'Host':"company.zhaopin.com",
                "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8",
                "Accept-Encoding": "gzip, deflate",
                "Accept-Language": "zh-CN,zh;q=0.9",
                "Connection": "keep-alive",
                "User-Agent": "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/62.0.3202.75 Safari/537.36"
         }
        all_the_text = URLResponse(company.url, headers).getHtml()
        soup = BeautifulSoup(all_the_text.text)
        try:
            company.name = soup.find('div', class_='mainLeft').find('h1').text
            comTinyDes = soup.find('table', class_="comTinyDes").find_all('tr')
            for c in comTinyDes:
                if "公司性质：" in c.text:
                    company.typs = self._filter(c.text.replace('公司性质：',''))
                elif "公司规模：" in c.text:
                    company.size = self._filter(c.text.replace('公司规模：',''))
                elif "公司行业：" in c.text:
                    company.industry = self._filter(c.text.replace('公司行业：',''))
                elif "公司地址：" in c.text:
                    company.address = self._filter(c.text.replace('公司地址：','').replace('查看公司地图',''))
        
            inst = soup.find("div", class_="company-content")
            company.profile = self._filter(inst.text)
            company.save()
        except:
            company.typs = 'failed'
            company.save()