from bs4 import BeautifulSoup
import sys
sys.path.append('../kernel')
sys.path.append('../model')
import spider_main
import city_model
import task_model
import city_xcurl_3_model
import city_xcurl_2_model
import re
import time
class City_hotelController(object):
    def __init__(self):
        #实例化核心对象
        self.spider = spider_main.SpiderMain()
    def city_hotel_xcurl_1(self):
        # url = 'http://m.ctrip.com/html5/hotel/sitemap-domestic'
        url = 'http://m.ctrip.com/html5/hotel/sitemap-domestic/6'#2-6
        # 下载页面
        html_cont = self.spider.downloader.downloader(url)
        html_obj = BeautifulSoup(html_cont, 'html.parser', from_encoding='utf-8')
        a_data=html_obj.find_all('a')
        c_model = city_model.CityModel()
        x1_model = city_xcurl_1_model.Xcurl_1()#携程酒店第一层url
        for a_tag in a_data:
            if 'class' not in str(a_tag):#只采集没有class属性的a链接
                names = re.split(r'[酒(县市]', a_tag['title'])#提取城市名称
                city_id_data=c_model.get_city_id_by_name(names[0])
                if city_id_data:
                    city_id=city_id_data[0]['id']
                else:
                    # 收集不存在的城市
                    file_object = open('city_is_null.txt', 'a')
                    file_object.write(a_tag['title']+"\n")
                    file_object.close()
                    print(a_tag['title'])
                    continue
                x1_model.add_city_hotel_url(a_tag['href'],city_id)
                print(a_tag['href'])
    def city_hotel_xcurl_2(self):
        url_hosts='http://m.ctrip.com'
        t_model=task_model.TaskModel()
        x1_model = city_xcurl_1_model.Xcurl_1()
        x2_model = city_xcurl_2_model.Xcurl_2()

        xcurl_1_tasy_id=t_model.get_xcurl_1()#获取第一层url执行记录
        xcurl_1_data=x1_model.get_xcurl(int(xcurl_1_tasy_id['sign']))# 携程酒店第一层url

        if xcurl_1_data==None:
            exit()

        cid=int(xcurl_1_data['cid'])
        xcurl_1_id=int(xcurl_1_data['id'])#第一层url的ID
        url=url_hosts+xcurl_1_data['url']

        html_cont = self.spider.downloader.downloader(url)
        html_obj = BeautifulSoup(html_cont, 'html.parser', from_encoding='utf-8')
        a_data = html_obj.find_all('a',attrs={'class':'line2items'})

        for a_tag in a_data:
            xcurl = a_tag['href']
            #排重
            xcurl_2_data = x2_model.get_xcurl_by_url(xcurl)
            if xcurl_2_data:#数据已经存在
                continue
            x2_model.add_city_hotel_url(xcurl,cid, xcurl_1_id)
            print(xcurl)
        #更新task记录
        cur_time = int(time.time())  # 实时时间
        t_model.save_city_xcurl_1(xcurl_1_id,cur_time)
    def city_hotel_xcurl_3(self):
        url_hosts='http://m.ctrip.com'
        t_model=task_model.TaskModel()
        x2_model = city_xcurl_2_model.Xcurl_2()
        x3_model = city_xcurl_3_model.Xcurl_3()

        xcurl_2_id=t_model.get_sign_by_id('city_hotel_xcurl_2')#获取第2层url执行记录
        xcurl_2_data=x2_model.get_xcurl_by_id(int(xcurl_2_id['sign']))# 携程酒店第2层url

        if xcurl_2_data==None:
            exit()

        cid=int(xcurl_2_data['cid'])
        xcurl_2_id=int(xcurl_2_data['id'])#第一层url的ID
        url=url_hosts+xcurl_2_data['url']

        html_cont = self.spider.downloader.downloader(url)
        html_obj = BeautifulSoup(html_cont, 'html.parser', from_encoding='utf-8')
        a_data = html_obj.find_all('a',class_='line2items')

        for a_tag in a_data:
            xcurl = a_tag['href']
            #排重
            xcurl_3_data = x3_model.get_xcurl_by_url(xcurl)
            if xcurl_3_data:#数据已经存在
                continue
            x3_model.add_city_hotel_url(xcurl,cid,xcurl_2_id)
            print(xcurl)

        #更新task记录
        cur_time = int(time.time())  # 实时时间
        t_model.save_sign_by_id(xcurl_2_id,cur_time,'city_hotel_xcurl_2')

if __name__=='__main__':
    obj=City_hotelController()
    count=1
    while True:
        obj.city_hotel_xcurl_3()
        count=count+1
        if count>10:
            exit()
        time.sleep(1)#停顿1秒

