from com.zjs.crawer.urlcontent.baseurlcontent import BaseUrlContent
from com.zjs.zjsqueue import zurlpathqueue
from bs4 import BeautifulSoup as BS
from com.zjs.util.download import request
from com.zjs.zjsqueue import zurlcontentqueue
from queue import Empty
import logging

class LianJiaPath(BaseUrlContent):
    
    def __init__(self):
        logging.debug("[path][lianjia]:start!")
    
    def run(self):
        while True:
            try:
                url = zurlpathqueue.get("lianjia")
#                 print("[path][lianjia]:"+url)
                self.dojob(url)
            except Exception as ex:
                logging.debug("[path][lianjia]:"+str(url)+"解析失败")
                logging.error(ex)
        
    def dojob(self,url_b):
        html=request.get(url_b,3)
        soup=BS(html.text,"lxml")
        # 保存A类链接的集合,避免重复,用set去重
        # 通过分析页面,抓取所有的class="pic-panel"的div元素下的a标签的href
        # 先通过find_all抓取所有符合条件的div
        try:
            divs_=soup.find_all("div",attrs={"class":"pic-panel"})
            # 遍历div,获取每个div下的a标签
            for div_ in divs_:
                # div_是单个div,现在获取这个div的子节点
                for a_ in div_.children:
                    # 通过观察,每个div下只有一个元素,就是a标签.所以不考虑其他情况,直接获取a标签的href
#                     print("[path][lianjia]:"+a_['href'])
                    zurlcontentqueue.put("lianjia",a_['href'])
        except:
            pass
