

from com.zjs.crawer.urlcontent.baseurlcontent import BaseUrlContent
from com.zjs.zjsqueue import zurlcontentqueue
from bs4 import BeautifulSoup as BS
import codecs
from com.zjs.util.download import request
from queue import Empty
import re,requests
from com.zjs.util.converttolocation import location
from com.zjs.util.write import write_content
import logging
from _ast import Num
import threading

class jobContents(BaseUrlContent):
    
    name = "job"
    
    def __init__(self):
        
        print("[content]["+self.name+"]:start!")
         
    
    def run(self):
        while True:
            try:
                threads = []
                for i in range(10):
                    url = zurlcontentqueue.get(self.name)
                    logging.debug("[content]["+self.name+"]:"+url)
                    threads.append(threading.Thread(target=self.get_content_,args=(url,)))
                    
                for t in threads:
                    t.start()
         
                for thread in threads: 
                    thread.join() 
                      
            except Exception as ex:
                logging.error("[content]["+self.name+"]:"+str(url)+"解析失败！" )
                logging.exception(ex, exc_info=1)  
            

    def get_content_(self,url):  
        result = {}
        
        html=request.get(url,3)
#         print (html.encoding)     ISO-8859-1
        html.encoding = "gb2312"
        soup=BS(html.text,"lxml")
    
        
        result={}


        topdiv = soup.find("div",attrs={"class":"cn"})
     
        if topdiv !=None:
            
       
            
            title=topdiv.find('h1',attrs={'title':True}).attrs['title']
            
            result['职位']=title
             
            comdiv=topdiv.find("p",attrs={"class":"cname"})
            company=comdiv.find('a',attrs={'title':True}).attrs['title']
           
            result['公司']=company
            
            location=topdiv.find("span",attrs={"class":"lname"}).text
           
            if location.find("-")==-1:
                result['市区']=location
            else :           
                locals=location.split("-")
                result['市区']=locals[0]
                result['区域']=locals[1]
            
            salary=topdiv.find("strong").text 

            ym=salary[-1]   #年或者月
            s=salary.strip(ym).strip("/")    #去掉年或者月  eg 4.5-6千  
            unit=s[-1]    #unit=ss[-1]   #工资单位
            sala=s.strip(unit)      #去掉工资单位   
            salarys=sala.split("-")     #将工资拆分
            salarys[0]=float(salarys[0])
            salarys[1]=float(salarys[1])
            
            if unit=="万":
                lower=salarys[0]*10000
                higher=salarys[1]*10000
            else:
                lower=salarys[0]*1000
                higher=salarys[1]*1000
            
            if ym=="年":
                #工资年薪将年薪换成月薪
                
                result["最低月薪"]=round(lower/12,2)
                result["最高月薪"]=round(higher/12,2)
            elif ym=="月":
                #工资为月薪     
                result["最低月薪"]=lower
                result["最高月薪"]=higher
            
                
            com_msg=topdiv.find("p",attrs={"class":"msg ltype"}).text
            
            msgs=com_msg.split("|");
            result["公司性质"]=msgs[0].replace("\r","").replace("\n","").replace(" ","").replace("\t","").replace("\xa0","")        
            result["公司规模"]=msgs[1].replace("\r","").replace("\n","").replace(" ","").replace("\t","").replace("\xa0","")
            if len(msgs)==3:
                result["公司产业"]=msgs[2].replace("\r","").replace("\n","").replace(" ","").replace("\t","").replace("\xa0","")
            
            topdiv2 = soup.find("div",attrs={"class":"tCompany_main"})
            ginfo=topdiv2.find_all("span",attrs={"class":"sp4"})
           
          
            reg=re.compile(r'/s*em>(.*)</span>')    #取需要的字符
            for i in range(0,len(ginfo)):
                ginfo[i]=str(ginfo[i])
                text=re.findall(reg,ginfo[i])
                if ginfo[i].find("i1")!=-1:   #经验
                    result["工作经验"]=text[0]
                elif ginfo[i].find("i1")==-1:   #经验
                    result["工作经验"]=""
                elif ginfo[i].find("i2")!=-1:   #学历
                    result["学历"]=text[0]
                elif ginfo[i].find("i2")==-1:   #学历
                    result["学历"]=""
                elif ginfo[i].find("i3")!=-1:#人数
                    if text=="若干人":
                        result["人数"]=text[0]
                    else :
                        num=text.strip("人")
                        num=num.strip("招聘")
                        result["人数"]=num
                elif ginfo[i].find("i3")==-1:
                    result["人数"]=""
                elif ginfo[i].find("i4")!=-1: #时间
                    result["发布时间"]=text[0]
                elif ginfo[i].find("i4")==-1: #时间
                    result["发布时间"]=""
                elif ginfo[i].find("i5")!=-1: #英语
                    result["英语水平"]=text[0]
                elif ginfo[i].find("i5")==-1: #英语
                    result["英语水平"]=""
                        

#             welfare=topdiv2.find_all("p",attrs={"class":"t2"})  
#             print (welfare)         
#             try:
#                 if welfare!=None:               
#                     result["福利"]=welfare.text
#                     result["福利"]=str(result["福利"]).strip("\n")
#                 else:
#                     result["福利"]=""
#             except Exception as ex:
#                 raise

            topdiv3=soup.find("div",attrs={"class":"bmsg inbox"})
            add=topdiv3.find("p",attrs={"class":"fp"})

            reg3=re.compile(r'.*</span>(.*)\s*</p>') 
            address=re.findall(reg3,str(add))
            adds=str(address[0])
            adds=adds.replace("\t","")
            result["地址"]=adds
            print("[content]["+self.name+"]:"+str(result))
            write_content(self.name,result)
    
            
