from com.zjs.crawer.urlcontent.baseurlcontent import BaseUrlContent
from com.zjs.zjsqueue import zurlcontentqueue
from bs4 import BeautifulSoup as BS
import codecs
from com.zjs.util.download import request
from queue import Empty
import re,requests
from com.zjs.util.converttolocation import location
from com.zjs.util.write import write_content
import logging

class ZhiLianContent(BaseUrlContent):
    
    name = "zhilian"
    
    def __init__(self):
        logging.debug("[content]["+self.name+"]:start!")
    
    def run(self):
        while True:
            try:
                url = zurlcontentqueue.get(self.name)
                logging.debug("[content]["+self.name+"]:"+url)
                self.get_content_(url)
            except Exception as ex:
                logging.error("[content]["+self.name+"]:"+str(url)+"解析失败！" )
                logging.exception(ex, exc_info=1)  
            

    def get_content_(self,url):
        baseurl="" 
        try: 
            result = {}
            html=request.get(url,3)
            baseurl = html.request.path_url
            soup=BS(html.text,"lxml")
            result={}
            topdiv = soup.find("div",attrs={"class":"inner-left fl"})
            title = topdiv.find("h1").text
            result['职位名称']=title
            tagsdiv = topdiv.find("div",attrs={"class":"welfare-tab-box"})
            tagspans=tagsdiv.find_all("span")
            tags=[]
            for tag in tagspans:
                tags.append(tag.text)
            result['标签'] = ",".join(tags)
            baseinfos = soup.find("ul",attrs={"class":"terminal-ul clearfix"})
            lis = baseinfos.find_all("li")
            for li in lis:
                contents = li.text.split("：")
                if len(contents) == 2:
                    if contents[0] == "职位月薪":
                        result['职位月薪金额'] = contents[1].split("元/月")[0]
                        result['职位月薪最高'] = result['职位月薪金额'].split("-")[1]
                        result['职位月薪最低'] = result['职位月薪金额'].split("-")[0]
                    if contents[0] == "招聘人数":
                        result['招聘人数个数'] = contents[1].split("人")[0]
                    result[contents[0]] = contents[1]
            companya=soup.find("p",attrs={"class":"company-name-t"}).find("a")
            result['公司名称']=companya.text
            result['公司链接']=companya['href']
    
            baseinfos = soup.find("ul",attrs={"class":"terminal-ul clearfix terminal-company mt20"})
            lis = baseinfos.find_all("li")
            for li in lis:
                contents = li.text.split("：")
                if len(contents) == 2:
                    if contents[0] == "公司地址":
                        result['公司地址'] = contents[1].replace("查看公司地图","")
                    else:
                        result[contents[0].replace("\n","")] = contents[1].replace("\n","").strip()
                        
            l = location.getLocation(result['公司地址'], result['工作地点'].split("-")[0])
            if l != {}:
                result['lng']=l['location']['lng']
                result['lat']=l['location']['lat']
                result['lcity']=l['city']
                result['ldistrict']=l['district']
            logging.debug("[content]["+self.name+"]:"+str(result))
            write_content(self.name,result)
        except Exception as ex:
            logging.error("[content]["+self.name+"]:"+str(url)+"解析失败！" +" [baseurl]:"+ baseurl)
            logging.exception(ex, exc_info=1)  

        
