#!/usr/bin/env python3

import sys
import os
import time
import requests
from bs4 import BeautifulSoup

url = "http://www.tripadvisor.cn/Attractions-g60763-Activities-New_York_City_New_York.html"
urls = ["http://www.tripadvisor.cn/Attractions-g60763-Activities-oa{}-New_York_City_New_York.html#ATTRACTION_LIST".format(str(i)) for i in range(30, 990, 30)]
urls.insert(0, url)
urls_phone = ["http://www.tripadvisor.cn/Attractions-g60763-Activities-oa{}-New_York_City_New_York.html#ATTRACTION_LIST".format(str(i)) for i in range(20, 1000, 20)]
urls_phone.insert(0, url)

"""
http://www.tripadvisor.cn/Attractions-g60763-Activities-New_York_City_New_York.html
http://www.tripadvisor.cn/Attractions-g60763-Activities-oa30-New_York_City_New_York.html#ATTRACTION_LIST
http://www.tripadvisor.cn/Attractions-g60763-Activities-oa60-New_York_City_New_York.html#ATTRACTION_LIST

...

http://www.tripadvisor.cn/Attractions-g60763-Activities-oa930-New_York_City_New_York.html#ATTRACTION_LIST
http://www.tripadvisor.cn/Attractions-g60763-Activities-oa960-New_York_City_New_York.html#ATTRACTION_LIST
"""

url_save = "http://www.tripadvisor.cn/Saves#354898"
# 利用浏览器cookie伪造已登陆状态
headers = {
        "User-Agent" : "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/45.0.2454.101 Safari/537.36",
        "Cookie" : "TASession=%1%V2ID.9E042D49739CEFDACE32EC038D48803A*SQ.7*PR.39776%7C*LS.RecommendedAjax*GR.90*TCPAR.64*TBR.77*EXEX.22*ABTR.59*PPRP.55*PHTB.64*FS.12*CPU.96*HS.popularity*ES.popularity*AS.popularity*DS.5*SAS.popularity*FPS.oldFirst*TS.067878FE7811762AA83B1F9DD60E0C4C*FA.1*DF.0*LP.%2FSaves*TRA.true; TATravelInfo=V2*A.2*MG.-1*HP.2*FL.3*RS.1; TAUnique=%1%enc%3AbFQRvXohvK%2BDO0WOIu3pRTqrjbfhaHtMx%2FTsvoyCAMk%3D; ServerPool=C; TAUD=LA-1467033867730-1*LG-107066-2.1.F.*LD-107068-.....; TASSK=enc%3ALrOWo7J05apCz61DbqTcqFjUTtLrlhouVe%2BtPSlUrQanNNg3L4mPPKEO4xbNV13z; roybatty=APCh6YKHOEWD1yycbDUmurp26TX6HmQ%2BH83RWWS1JXTQ7POGLzpO4Q5QadT8s3%2BvNkioYelsIX70mW0SqjzzLxxQSZU2qiSzKmeWL07jLEvziic0gMaGGsM3DrOsFDM4YpD7GEURjhisgHGJQVBgH13IaQsPdnWGu8Eu46i%2FS%2B6C%2C1; NPID=; Hm_lvt_2947ca2c006be346c7a024ce1ad9c24a=1467033879; Hm_lpvt_2947ca2c006be346c7a024ce1ad9c24a=1467033974; _jzqa=1.3656830755788597000.1467033879.1467033879.1467033879.1; _jzqb=1.2.10.1467033879.1; _jzqc=1; _jzqckmp=1; ki_t=1467033886021%3B1467033886021%3B1467033975520%3B1%3B2; ki_r=; TAAuth2=%1%3%3A2731ab4558b9764815973b614431eb88%3AAMuvtu3ziuSKAOOxF5c9Bv%2FxjnSyZ4qmOKlejV%2FvEoDv27CAcwlhjFs14fUN7GJX7W5xHsV2GggP6Zt0mfwfICZMXdHks5hKJE5yTxX6%2Fp8xu%2BplfqKrT6MdIBW%2Fo6LyM4ScN%2FAd%2BPNitD0h4%2Bs%2BcBYk%2FYk%2FAWvKmnwcdULKWvNUX7XdhpbepRzTPpQcW5CiBFN%2Fj9bUu92jztS9V2Q9ylU%3D; _qzja=1.1567033035.1467033878929.1467033878929.1467033878930.1467033878930.1467033974583..0.0.2.1; _qzjb=1.1467033878929.2.0.0.0; _qzjc=1; _qzjto=2.1.0; CommercePopunder=SuppressAll*1467033979143"
        }

# 默认headers
headers_default = {
        "User-Agent" : "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/45.0.2454.101 Safari/537.36",
        }

headers_iphone = {
        "User-Agent" : "Mozilla/5.0 (iPhone; CPU iPhone OS 8_0 like Mac OS X) AppleWebKit/600.1.3 (KHTML, like Gecko) Version/8.0 Mobile/12A4345d Safari/600.1.4"
        }


def get_spots_on_phone(url, h = headers_iphone, data = None):
    """手机版网页反爬手段比较少，可以直接获取到图片url"""
    webdata = requests.get(url, timeout = 10, headers = h)

    soup = BeautifulSoup(webdata.text, "lxml")
    titles = soup.select('div.container.containerLLR > div.title.titleLLR > div')
    images = soup.select('div.thumb.thumbLLR.soThumb > div')
    tags = soup.select("div.container.containerLLR > div.attraction_types > span")

    for title, image, tag in zip(titles, images, tags):
        data = {
                "title": title.get_text(). replace("\n", ""),
                "image": image.get("data-thumburl"),
                "tag": tag.get_text(),
                }
        print(data)


def get_spots(url, data = None):
    webdata = requests.get(url, timeout = 10)

    soup = BeautifulSoup(webdata.text, "lxml")
    titles = soup.select('div.property_title > a[target="_blank"]') # 根据所有父级div为property_title的a标签找出所有title。并且只取含有target属性的a标签。
    images = soup.select('img[width="160"]') # 根据所有带有width="160"这个属性的img标签找出所有image
    tags = soup.select("div.p13n_reasoning_v2")  # 根据所有名为p13n_reasoning_v2的父级div标签找出所有tag

    for title, image, tag in zip(titles, images, tags):
        data = {
                "title": title.get_text(),
                "image": image.get("src"), #得不到图片正确url，因为网站启用了反爬。图片url在js中。可以用正则表达式提取
                "tag": list(tag.stripped_strings),
                }
        print(data)


def get_favs(url, data = None):
    data = requests.get(url, headers = headers)
    soup = BeautifulSoup(data.text, 'lxml')

    titles = soup.select("a.location-name")
    images = soup.select("img.photo_image")
    addresss = soup.select("span.format_address")

    for title, image, address in zip(titles, images, addresss):
        data = {
                'title': title.get_text(),
                'image' : image.get("src"),
                #'address' : list(address.stripped_strings)
                'address' : address.get_text().strip()
                }
        print(data)

def getencoding(response):
   """ Re-Detect page encoding """
   if response.encoding == 'ISO-8859-1':
       encodings = requests.utils.get_encodings_from_content(response.text)
       if encodings:
           encoding = encodings[0]
       else:
           encoding = response.apparent_encoding
           # encoding = 'utf-8'
   else:
       encoding = response.encoding

   return encoding


def getcontent(url):
    try:
        response = requests.get(url)
        if response.status_code != 200:
            response.raise_for_status()
        response.encoding = getencoding(response)
    except Exception as e:
        return ""

    return response.text


def savefile(path, content):
    try:
        print("save to", path)
        with open(path, 'w') as f:
            f.write(content)
    except Exception as e:
        if os.path.exists(path):
            os.remove(path)

for url in urls:
    content = getcontent(url)
    savefile(url[url.rfind("/") + 1: ], content)

for url in urls_phone:
    content = getcontent(url)
    savefile("./web/phone/" + url[url.rfind("/") + 1: ], content)

# 通过PC网页解析景点信息
for url in urls:
    get_spots(urls)
    print("\n\n\n", url)
    time.sleep(2) # 防止网站反爬

# 通过手机网页解析景点信息
for url in urls_phone:
    print("\n\n\n", url)
    get_spots_on_phone(url, headers_iphone)
    time.sleep(2) # 防止网站反爬
