from bs4 import BeautifulSoup
import re
import os
import requests
import csv

# 不会写爬虫，对着组长的代码学的，欸嘿。
# 惊喜地发现用request爬不了想去和去过
# 因为爬的时候页面没有加载完全，而request没有等待功能
# 但是其他的方法感觉都没request好用，所以这两个数据人工导入
#sys.stdout = io.TextIOWrapper(sys.stdout.buffer,encoding='gb18030')
city_list = ['北京', '重庆', '上海', '天津', '成都',
             '三亚', '杭州', '武汉', '广州', '西安',
             '贵阳', '苏州', '昆明', '南京', '桂林',
             '张家界', '丽江', '哈尔滨', '拉萨', '长沙']  # 城市名称
want_list = ['124893', '61952', '108922', '31553', '87077',
             '198556', '95941', '34700', '49965', '79685',
             '5063', '56507', '80444', '51122', '84654',
             '78677', '211244', '70042', '127367', '38075']  # 想去人数
went_list = ['1020374', '378010', '984634', '381110', '453206',
             '440141', '750131', '336533', '528549', '434636',
             '60079', '515347', '363437', '544254', '266639',
             '163828', '374215', '207866', '98930', '273566']  # 去过人数

name_list = []  # 游记名称
date_list = []  # 出游日期
time_list = []  # 出游时长
cost_list = []  # 旅行花费
whom_list = []  # 和谁出游
read_list = []  # 浏览
like_list = []  # 喜欢
chat_list = []  # 回复
node_list = ['beijing1', 'chongqing158', 'shanghai2', 'tianjin154', 'chengdu104',
             'sanya61', 'hangzhou14', 'wuhan145', 'guangzhou152', 'xian7',
             'guiyang33', 'suzhou11', 'kunming29', 'nanjing9', 'guilin28',
             'zhangjiajie23', 'lijiang32', 'harbin151', 'lhasa36', 'changsha148'] # 手动导入网址编号



headers = {
    "authority": "you.ctrip.com",
    "accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9",
    "accept-language": "zh-CN,zh;q=0.9,en;q=0.8,en-GB;q=0.7,en-US;q=0.6",
    "cache-control": "no-cache",
    "pragma": "no-cache",
    "sec-ch-ua": "^\\^Not_A",
    "sec-ch-ua-mobile": "?0",
    "sec-ch-ua-platform": "^\\^Windows^^",
    "sec-fetch-dest": "document",
    "sec-fetch-mode": "navigate",
    "sec-fetch-site": "same-origin",
    "sec-fetch-user": "?1",
    "upgrade-insecure-requests": "1",
    "user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/109.0.0.0 Safari/537.36 Edg/109.0.1518.69"}


def getdata():
    for i in range(len(city_list)):
        if os.path.exists('./游记数据/'+city_list[i]) == False:
            os.mkdir(r'./游记数据/'+city_list[i])
        for m in range(1,5):
            name_list.clear()
            date_list.clear()
            time_list.clear()
            cost_list.clear()
            whom_list.clear()
            read_list.clear()
            like_list.clear()
            chat_list.clear()

            url = 'https://you.ctrip.com/travels/beijing1/t2-m'+str(m)+'.html'
            page_text = requests.get(url=url, headers=headers, timeout=(100, 100))
            html = page_text.content.decode('utf-8', 'ignore')
            soup = BeautifulSoup(html, 'lxml')

            page = int(soup.find_all('b', class_="numpage")[0].text)-1
            #"""
            for p in range (page):
                print(city_list[i]+str(m)+':Page'+str(p)+'/'+str(page))

                url = 'https://you.ctrip.com/travels/' +node_list[i]+ '/t2-p' +str(p)+ '-m' +str(m)+ '.html'
                page_text = requests.get(url=url, headers=headers, timeout=(100,100))
                html = page_text.content.decode('utf-8', 'ignore')
                soup = BeautifulSoup(html, 'lxml')

                name_text = soup.find_all('dt', class_="ellipsis")
                tag_text = soup.find_all('span', class_="tips_a")
                read_text = soup.find_all('i', class_="numview")
                like_text = soup.find_all('i', class_="want")
                chat_text = soup.find_all('i', class_="numreply")

                r = len(name_text)
                for j in range(r):
                    tag = tag_text[j].text
                    time = re.search(r'.天', str(tag))
                    if time is None:
                        time = ''
                    else :
                        time = time.group()
                    date = re.search(r'20\d\d-\d\d-\d\d \d\d:\d\d:\d\d', str(tag))
                    if date is None:
                        date = ''
                    else :
                        date = date.group()
                    cost = re.search(r'￥\d*', str(tag))
                    if cost is None:
                        cost = ''
                    else :
                        cost = cost.group()
                    if cost == '￥1': # 稍微清洗一下广告，大约能洗掉25%
                        continue
                    whom = re.search(r'亲子|和父母|和朋友|一个人|夫妻|情侣', str(tag))
                    if whom is None:
                        whom = ''
                    else:
                        whom = whom.group()

                    name = name_text[j].text
                    read = read_text[j].text
                    like = like_text[j].text
                    chat = chat_text[j].text

                    name_list.append(name.encode('gbk', 'ignore').decode('gbk'))
                    date_list.append(date.encode('gbk', 'ignore').decode('gbk'))
                    time_list.append(time.encode('gbk', 'ignore').decode('gbk'))
                    cost_list.append(cost.encode('gbk', 'ignore').decode('gbk'))
                    whom_list.append(whom.encode('gbk', 'ignore').decode('gbk'))
                    read_list.append(read.encode('gbk', 'ignore').decode('gbk'))
                    like_list.append(like.encode('gbk', 'ignore').decode('gbk'))
                    chat_list.append(chat.encode('gbk', 'ignore').decode('gbk'))

            data = ['游记名称','出游日期','出游时长','旅行花费','和谁出游','浏览','喜欢','回复']
            with open('./游记数据/'+city_list[i]+'/第'+str(m)+'季度.csv', 'w+', newline='') as csvfile:
                # with open('./游记数据/北京/测试.csv', 'w+', newline='') as csvfile:
                writer = csv.writer(csvfile, dialect='excel')
                writer.writerow(data)
                r = len(name_list)
                for j in range(r):
                    data = [name_list[j],date_list[j],time_list[j],cost_list[j],
                    whom_list[j],read_list[j],like_list[j],chat_list[j]]
                    writer.writerow(data)
            print(city_list[i]+str(m)+'Finished')
        print(city_list[i]+'Finished')
        #print('数据总条数：'+str(total))
            #"""


def getcsv():
    data = ['城市', '想去', '去过']
    with open('./热度.csv', 'w+', newline='') as csvfile:
        writer = csv.writer(csvfile, dialect='excel')
        writer.writerow(data)
        r = len(city_list)
        for j in range(r):
            data = [city_list[j], want_list[j], went_list[j]]
            writer.writerow(data)

def gettotal():
    total = 0
    for i in range(len(city_list)):
        for m in range(1, 5):
            print(city_list[i]+str(m))
            total = total + len(open('./游记数据/'+city_list[i]+'/第'+str(m)+'季度.csv').readlines())
    print(total)
getdata()
# getcsv()
gettotal()  # total = 38046