import requests
import json
import os
import pandas as pd
from tqdm import tqdm

poiIds = []
poiNames = []


def read_pois():
    global poiIds
    global poiNames
    # 打开文件，使用读取模式
    with open("poiIds.txt", "r", encoding="utf-8") as file:
        # 使用 readlines() 方法读取文件的所有行，并将其存储到列表中
        lists = [line.strip() for line in file.readlines()]
        for list in lists:
            splits = list.split(' ')
            poiIds.append(splits[0])
            poiNames.append(splits[-1])


def craw(poiId, poiName):
    userNames = []
    commentDetails = []
    commentTimes = []
    ipZones = []

    total_pages = 15

    for pagen in tqdm(range(0, total_pages), desc=poiName + ' - 评论数据 - 爬取进度', unit='页'):
        payload = {
            "arg": {
                "channelType": 2,
                "collapseTpte": 0,
                "commentTagId": 0,
                "pageSize": 50,
                "poiId": poiId,  # 需要自己更改的地方 为景点的信息
                "sourseType": 1,
                "sortType": 3,
                "pageIndex": pagen,
                "starType": 0
            },
            "head": {
                "cid": "09031062417234242897",
                "ctok": "",
                "cver": "1.0",
                "lang": "01",
                "sid": "888",
                "syscode": "09",
                "auth": "",
                "xsid": "",
                "extension": []
            }
        }
        postUrl = "https://m.ctrip.com/restapi/soa2/13444/json/getCommentCollapseList"

        html = requests.post(postUrl, data=json.dumps(payload)).text
        html_1 = json.loads(html)

        # 检查响应中是否存在'items'
        if 'items' in html_1["result"]:
            commentItems = html_1["result"]["items"]

            for i in range(0, len(commentItems)):
                try:
                    # 在访问元素之前检查当前项是否不为None
                    if commentItems[i] is not None and 'userInfo' in commentItems[i] and 'userNick' in commentItems[i][
                        'userInfo']:
                        userName = commentItems[i]['userInfo']['userNick']
                        commentDetail = commentItems[i]['content']
                        commentTime = commentItems[i]['publishTypeTag'].split(' ')[0]
                        ipZone = commentItems[i]['ipLocatedName']

                        userNames.append(userName)
                        commentDetails.append(commentDetail)
                        commentTimes.append(commentTime)
                        ipZones.append(ipZone)
                except Exception as e:
                    pass

    # 创建 DataFrame
    df = pd.DataFrame({
        '用户评论内容': commentDetails,
        '用户名': userNames,
        '用户评论时间': commentTimes,
        '用户属地': ipZones
    })

    # 保存到 Excel 文件
    df.to_excel(os.path.join('res_data', poiName + '.xlsx'), index=False)


if __name__ == '__main__':
    # 初始化关键词
    read_pois()
    # 批量爬取
    for i in range(len(poiIds)):
        craw(poiIds[i], poiNames[i])
