# coding=utf-8
from bs4 import BeautifulSoup
import time
import re
from  spider.models import  Company,Job
import requests

class URLResponse:
    headers = {
        'Host':"www.buildhr.com",
        "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8",
        "Accept-Encoding": "gzip, deflate",
        "Accept-Language": "zh-CN,zh;q=0.9",
        "Connection": "keep-alive",
        "User-Agent": "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/62.0.3202.75 Safari/537.36",
        "Content-Type" : "application/x-www-form-urlencoded",
        "Upgrade-Insecure-Requests" : "1"
    }
    def __init__(self, url):
        self.url = url
        
    def getHtml(self, method = 'get', data = {}):
        if method == 'get':
            return requests.get(self.url, headers = self.headers, timeout = 10)



class Buildhr:
    soup = None
    url = ''
    
    def initSoap(self, url):
        """
        初始化
        """
        all_the_text = URLResponse(url).getHtml()
        self.soup = BeautifulSoup(all_the_text.content)
        self.url = url
        return self.soup
    
    def urls(self, url, d):
        """
        抓取公司url
        """
        all_the_text = URLResponse(url).getHtml()
        self.soup = BeautifulSoup(all_the_text.content.decode('gbk'))
        cUrlsCompiles = [
            re.compile("/company/([a-zA-Z0-9]+)/"),
        ]
        objs = self.soup.find('div',class_="search_list").find_all(href = cUrlsCompiles)
        urls = []
        for o in objs:        
            company_name =  self._filter(o.text)
            if d[2] not in company_name:
                break
                
            companys = Company.objects.filter(name=d[2])
            if not companys:
                cInstance = Company.objects.create(**{
                    "url" : "http://www.buildhr.com/%s" % o['href'],
                    "name" : d[2],
                    "email" : '',
                    "phone" : '',
                    "web" : '',
                    "area" : '%s|%s' % (d[0],d[1]),
                })
            else:
                cInstance = companys[0]    
                
            cUrlsCompiles = [
                re.compile("/job/([a-zA-Z0-9]+)/"),
            ]
            jobs = o.parent.parent.parent.find_all(href = cUrlsCompiles)
            for j in jobs:
                if "更详细" in j.text:
                    continue
                jobInfo = Job.objects.filter(url=j['href'])
                if not jobInfo:
                    jInstance = Job.objects.create(**{
                        "url" : "http://www.buildhr.com/%s" % j['href'],
                        'company_id' : cInstance.id
                    })
                    print("buildhr-job: %s" %jInstance.id)
                else:
                    print("buildhr has existed:%s" % j['href'])
            
            
        return urls
    
    def getJobsUrls(self):
        """
        抓取职位url
        """
        cUrlsCompiles = [
            re.compile("/job/([a-zA-Z0-9]+)/"),
        ]
        objs = self.soup.find('div',class_="search_list").find_all(href = cUrlsCompiles)
        urls = []
        for o in objs:
            if "更详细" in o.text:
                continue
            urls.append({
                'job_url' : self._filter(o['href']),
                'job_name' : self._filter(o.text)
            })
            
        return urls
            
    def getCompanyUrls(self, name = None):
        """
        抓取公司url
        """
        cUrlsCompiles = [
            re.compile("/company/([a-zA-Z0-9]+)/"),
        ]
        objs = self.soup.find('div',class_="search_list").find_all(href = cUrlsCompiles)
        urls = []
        for o in objs:              
            urls.append({
                'company_url' : self._filter(o['href']),
                'company_name' : self._filter(o.text)
            })
            
        return urls

    def getCompanyInfo(self, company=None):
        """
        公司信息
        """
        all_the_text = URLResponse(company.url).getHtml()
        self.soup = BeautifulSoup(all_the_text.content.decode('gbk'))
        try:
            objs = self.soup.find('div',class_="com_info")
            comtypes = objs.find('ul', class_='company_info').find_all('li')
            for typ in comtypes:
                if '公司性质' in typ.text:
                    company.typs = self._filter(typ.text.strip('公司性质：'))
                elif '公司规模' in typ.text:
                    company.size = self._filter(typ.text.strip('公司规模：'))
                    
            cominfos = objs.find_all('div', class_='company_contact company_contact_mb u_whsn')
            for info in cominfos:
                if '公司简介' in info.text:
                    company.profile = self._filter(info.text.strip('公司简介'))
                    
            #company.url =  self.url       
            company.save()
        except:
            company.typs = 'failed'
            company.save()    
        return urls


    def getJobInfo(self, job = None):
        print(job.url)
        all_the_text = URLResponse(job.url).getHtml()
        self.soup = BeautifulSoup(all_the_text.content.decode('gbk'))
        try:
            
            jobObj = self.soup.find('div', class_="wrap_lt_job")
            job.name = self._filter(jobObj.find('h1').text)
            job.dated = (self._filter(self.soup.find('div', class_='wrap_title_rt').text)).strip('更新日期：')
            companyinfos = jobObj.find('ul', class_='company_info').find_all('li')
            for companyinfo in companyinfos:
                if '工作地点：' in companyinfo.text:
                    job.address = companyinfo.text.strip('工作地点：')
                
            jtags = jobObj.find('ul', class_="job_info").find_all('li')
            for tag in jtags:
                if '学历要求：' in tag.text:
                    job.school = self._filter(tag.text.strip('学历要求：'))
                elif '招聘人数：' in tag.text:
                    job.number = self._filter(tag.text.strip('招聘人数：'))
                elif '工作经验：' in tag.text:
                    job.exper = self._filter(tag.text.strip('工作经验：'))
                elif '工资待遇：' in tag.text:
                    job.money = self._filter(tag.text.strip('工资待遇：'))

            job.desc = jobObj.find('dl',class_='zxd_jobinfo').text

            job.runed = 1
            job.save()
        except:
            job.runed = 1
            job.save()
        


    def _filter(self, strs):
        if strs:
            return strs.strip()
        return ""
    
    

