from bs4 import BeautifulSoup
import sys
# sys.path.append('../kernel')
# sys.path.append('../model')
sys.path.append('/home/python/caigou/kernel')
sys.path.append('/home/python/caigou/model')
import urllib.parse
import cg_task
import cg_info
import urllib.request
import ssl
import spider_main
import time
class QingdaoController(object):
    def __init__(self):
        self.exists_count=0
        #实例化核心对象
        self.spider = spider_main.SpiderMain()
    #获取列表信息
    def get_info(self):
        id = 936 #青岛
        t_mode = cg_task.TaskModel()
        list_url = t_mode.get_task_by_id(id)
        url=list_url['web_url']

        host_name = urllib.parse.urlparse(url).hostname
        cur_time = int(time.time())
        i_model = cg_info.InfoModel()
        #解决https证书报错
        ssl._create_default_https_context = ssl._create_unverified_context
        data = urllib.request.urlopen(url).read().decode('gbk')

        html_obj = BeautifulSoup(data, 'html.parser')
        a_data = html_obj.find_all('a',attrs={"name":"lenn41"})
        for a_tag in a_data:
            xcurl = host_name+a_tag['href']
            xtitle = a_tag['title']
            exists_data=i_model.exists(xcurl)
            if exists_data:
                self.exists_count=self.exists_count+1 #计算出现重复数据的条数，如果一个页面超过三次就停止掉
                if self.exists_count>=3:
                    print('2页面已经爬取过，停止程序！')
                    break
                continue
            i_model.add_info(xtitle,id,xcurl,cur_time)
            print(xcurl)
# if __name__=='__main__':
#     obj=QingdaoController()
#     obj.get_info()