from webCrawler import webCrawler
import time
from bs4 import BeautifulSoup

from selenium.webdriver import ActionChains
import selenium.webdriver.support.ui as ui
from selenium.common.exceptions import TimeoutException
from selenium.webdriver.common.by import By
import selenium.webdriver.support.expected_conditions as EC


class Ctrip(webCrawler):

    def __init__(self):
        super(Ctrip, self).__init__()
        self.fileName = './result/ctrip/{}Ctrip.txt'.format(self.today)

    def getCtripPageInfo(self, typeNum, cityCode):
        """
        访问网页，得到页面的线路信息，如果出游人数不存在，直接跳过；如果出游人数超万，返回线路id和供应商列表；其它情况记录下id，人数，供应商
        :return:成功返回true，失败返回false
        """
        routeList = []
        r = self.browser.page_source
        soup = BeautifulSoup(r, "html.parser")
        routes = soup.find_all('div', attrs={'class': 'list_product_box js_product_item'})
        for route in routes:
            try:
                route.find('div', attrs={'class':'list_change_box basefix'})
                routeId = route['data-track-product-id']
                retail = route.find('p', attrs={'class':'list_product_retail'})['title']
                title = route.find('p',attrs={'class':'list_product_title'})['title']
                if route.find('div',attrs={'class':'list_change_one'}).get_text() == '':
                    person = route.find('div',attrs={'class':'list_change_two'}).get_text()
                else:
                    person = route.find('div', attrs={'class': 'list_change_one'}).get_text()
                if '万' in person:
                    routeList.append([routeId,retail,typeNum,cityCode])
                else:
                    with open(self.fileName, 'a', encoding='utf-8') as f:
                        f.write(str(typeNum) + '|' + cityCode + '|'+routeId + '|' + retail + '|' + title + '|' + person + '\n')
            except:
                pass
        return routeList

    def getCtripRouteList(self, pageNum, cityCode, typeNum=1):
        """
        得到携程多页面的线路信息
        :param typeNum: 出游类型，默认为1跟团游，2周边游，3自由行
        :param cityCode: 城市编码，默认为sanya-61,海口为haikou-37
        :param pageNum: 页面数
        :return: 返回数量破万的线路id及供应商列表
        """
        allRouteList = []
        try:
            self.browser.get('https://vacations.ctrip.com/list/grouptravel/d-{}.html'.format(cityCode))
            time.sleep(3)
            self.checkElementExist('down')
            self.browser.find_element_by_class_name('start_city_station ').click()
            time.sleep(1)
            self.checkElementExist('down')
            self.browser.find_element_by_link_text('上海').click()
            time.sleep(1)
            self.checkElementExist('down')

            if typeNum == 1:
                pass
            elif typeNum == 2:
                self.browser.find_element_by_css_selector('#root > div > div.vacation_bd > div.search_filtrate > div > a:nth-child(3)').click()
                time.sleep(3)
            else:
                self.browser.find_element_by_css_selector('#root > div > div.vacation_bd > div.search_filtrate > div > a:nth-child(4)').click()
                time.sleep(3)
            time.sleep(2)
            self.checkElementExist('down')

            for i in range(0, pageNum):
                routeList = self.getCtripPageInfo(typeNum,cityCode)
                allRouteList += routeList
                down = self.browser.find_element_by_class_name('down')
                ActionChains(self.browser).move_to_element(down).perform()
                time.sleep(3)
                down.click()
                time.sleep(2)
                self.checkElementExist('down')

            return allRouteList

        except Exception as e:
            print(e)
            print(self.browser.current_url)
            pass

    def getRouteInfo(self,urlInfo):
        """
        用来收集那些出游破万的线路信息
        :param urlInfo:
        :return:
        """
        url = 'https://vacations.ctrip.com/travel/detail/p{}.html'.format(urlInfo[0])
        self.browser.get(url)
        if self.is_visible('score_inf'):
            title = self.browser.find_element_by_class_name('detail_title_subhead').text
            person = self.browser.find_element_by_xpath('//*[@id="base_bd"]/div[1]/div/div[2]/div[1]/div[1]/div[1]/div/div[2]/div/span[3]').text
            with open(self.fileName,'a',encoding='utf-8') as f:
                f.write(str(urlInfo[2]) + '|'+urlInfo[3] + '|'+urlInfo[0] + '|' + urlInfo[1] + '|' + title + '|' + person + '\n')
        else:
            print(urlInfo[0])
            pass

    def checkElementExist(self, className, retry=3):
        for i in range(retry):
            try:
                self.browser.find_element_by_class_name(className)
                return True
            except:
                self.browser.refresh()
                time.sleep(3)

        raise ValueError(self.browser.current_url + "：连续三次刷新不成功")

    def is_visible(self, locator, timeout=10):
        try:
            ui.WebDriverWait(self.browser, timeout).until(EC.visibility_of_element_located((By.CLASS_NAME, locator)))
            return True
        except TimeoutException:
            return False

    def getCtrip(self):

        list1 = self.getCtripRouteList(6, 'sanya-61', 1)
        if len(list1) > 0:
            for urlInfo in list1:
                self.getRouteInfo(urlInfo)

        list2 = self.getCtripRouteList(5, 'sanya-61', 2)
        if len(list2) > 0:
            for urlInfo in list2:
                self.getRouteInfo(urlInfo)

        list3 = self.getCtripRouteList(4, 'sanya-61', 3)
        if len(list3) > 0:
            for urlInfo in list3:
                self.getRouteInfo(urlInfo)

        list4 = self.getCtripRouteList(6, 'haikou-37', 1)
        if len(list4) > 0:
            for urlInfo in list4:
                self.getRouteInfo(urlInfo)

        list5 = self.getCtripRouteList(5, 'haikou-37', 2)
        if len(list5) > 0:
            for urlInfo in list5:
                self.getRouteInfo(urlInfo)

        list6 = self.getCtripRouteList(4, 'haikou-37', 3)
        if len(list6) > 0:
            for urlInfo in list6:
                self.getRouteInfo(urlInfo)


if __name__ == '__main__':
    A = Ctrip()
    A.getCtrip()
