from time import sleep

import requests
from lxml import etree
import pandas as pd

headers = {
    'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/116.0.0.0 Safari/537.36 Edg/116.0.1938.69'
}


def main():
    urls = join_url()
    data = []
    for url in urls:
        print(url)
        div_list = spider_data(url)
        data += collect_data(div_list)
        sleep(1)
    save_data(data)


def join_url():
    urls = [f'https://you.ctrip.com/sight/guangzhou152/s0-p{i}.html' for i in range(1, 201)]
    return urls


def spider_data(url):
    response = requests.get(url, headers=headers)
    html = response.text
    html_doc = etree.HTML(html)
    div = html_doc.xpath('/html/body/div[2]/div[2]/div[4]/div/div[2]/div/div[3]')[0]
    div_list = div.xpath('.//div[@class="list_mod2"]')
    return div_list


def collect_data(div_list):
    data = []
    for i in div_list:
        try:
            info = i.xpath('./div[@class="rdetailbox"]')[0]
            name = info.xpath('./dl/dt/a[1]/text()')[0]
            heat = info.xpath('./dl/dt/a[2]/b[2]/text()')[0]
            address = info.xpath('./dl/dd[@class="ellipsis"]/text()')[0]
            score = info.xpath('./ul/li[1]/a/strong/text()')[0]
            star_level = info.xpath('./ul/li[2]/span/span/@style')[0]
            number_of_comments = info.xpath('./ul/li[3]/a/text()')[0]
            comment = i.xpath('./p/text()')[1]
            data.append([name, heat, address, score, star_level, number_of_comments, comment])
        except:
            continue
    return data


def save_data(data):
    df = pd.DataFrame(data, columns=['name', 'heat', 'address', 'score', 'star_level', 'number_of_comments', 'comment'])
    df.to_csv('../static/data/scenic_spots_info.csv', index=False, encoding='utf_8_sig')


if __name__ == '__main__':
    main()
