import os
import pandas as pd
import requests
from bs4 import BeautifulSoup as Bsoup
import json

def merge_sight_csv():
    csv_folder = os.path.abspath(
        os.path.join(os.path.join(os.path.dirname(__file__), ".."), 'static/spider/data/sight'))  # 替换成实际的目标文件夹路径
    csv_files = os.listdir(csv_folder)
    combined_csv = pd.DataFrame()
    for csv_file in csv_files:
        if csv_file.endswith('.csv'):
            csv_path = os.path.join(csv_folder, csv_file)
            print(csv_path)
            df = pd.read_csv(csv_path, encoding='utf-8')
            combined_csv = combined_csv.append(df, ignore_index=True)

    r_csv_name = 'ultimate_sight.csv'
    r_csv_path = os.path.join(os.path.abspath(
        os.path.join(os.path.join(os.path.dirname(__file__), ".."), 'static/spider/data')),
        r_csv_name)
    combined_csv.to_csv(r_csv_path, index=False, encoding='utf-8')

def merge_city_csv():
    csv_folder = os.path.abspath(
        os.path.join(os.path.join(os.path.dirname(__file__), ".."), 'static/spider/data/city'))  # 替换成实际的目标文件夹路径
    csv_files = os.listdir(csv_folder)
    combined_csv = pd.DataFrame()
    for csv_file in csv_files:
        if csv_file.endswith('.csv'):
            csv_path = os.path.join(csv_folder, csv_file)
            print(csv_path)
            df = pd.read_csv(csv_path, encoding='utf-8')
            combined_csv = combined_csv.append(df, ignore_index=True)

    r_csv_name = 'ultimate_city.csv'
    r_csv_path = os.path.join(os.path.abspath(
        os.path.join(os.path.join(os.path.dirname(__file__), ".."), 'static/spider/data/ultimate_city')),
        r_csv_name)
    combined_csv.to_csv(r_csv_path, index=False, encoding='utf-8')
# merge_city_csv()
#
# def merge_comment_csv():
#     csv_folder = os.path.abspath(
#         os.path.join(os.path.join(os.path.dirname(__file__), ".."), 'static/spider/data/comment'))  # 替换成实际的目标文件夹路径
#     csv_files = os.listdir(csv_folder)
#     combined_csv = pd.DataFrame()
#     for csv_file in csv_files:
#         if csv_file.endswith('.csv'):
#             csv_path = os.path.join(csv_folder, csv_file)
#             print(csv_path)
#             df = pd.read_csv(csv_path, encoding='utf-8')
#             combined_csv = combined_csv.append(df, ignore_index=True)
#
#     r_csv_name = 'ultimate_city.csv'
#     r_csv_path = os.path.join(os.path.abspath(
#         os.path.join(os.path.join(os.path.dirname(__file__), ".."), 'static/spider/data/ultimate_comment')),
#         r_csv_name)
#     combined_csv.to_csv(r_csv_path, index=False, encoding='utf-8')


def sight_comment_csv(businessId):

    cookies = {
        'GUID': '09031047217480507018',
        'nfes_isSupportWebP': '1',
        'ibu_h5_lang': 'en',
        'ibu_h5_local': 'en-us',
        'nfes_isSupportWebP': '1',
        '_RSG': '_Jq487AH8pAesaRsfgT64B',
        '_RDG': '28c388f1743cd62eda217a961d6f89838f',
        '_RGUID': 'acc844a1-c813-44ff-b106-76aa766e4da8',
        'Union': 'OUID=&AllianceID=66672&SID=1693366&SourceID=&AppID=&OpenID=&exmktID=&createtime=1679131250&Expires=1679736050225',
        '_RF1': '173.82.245.30',
        'Hm_lvt_37b54c42b9dde393e60c88c1a84657cb': '1679131247,1679311080',
        'librauuid': '',
        'hotelhst': '1164390341',
        'Hm_lpvt_37b54c42b9dde393e60c88c1a84657cb': '1679311126',
        '_bfa': '1.1679131245585.3ilkh3.1.1679311128708.1679311133651.3.15.0',
        '_bfs': '1.1',
        '_ubtstatus': '%7B%22vid%22%3A%221679131245585.3ilkh3%22%2C%22sid%22%3A3%2C%22pvid%22%3A15%2C%22pid%22%3A0%7D',
        '_bfaStatusPVSend': '1',
        '_bfi': 'p1%3D10650064020%26p2%3D0%26v1%3D15%26v2%3D0',
        '_bfaStatus': 'success',
        '_pd': '%7B%22_o%22%3A3%2C%22s%22%3A66%2C%22_s%22%3A0%7D',
    }

    headers = {
        'authority': 'm.ctrip.com',
        'accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7',
        'accept-language': 'zh-CN,zh;q=0.9',
        'cache-control': 'max-age=0',
        # 'cookie': 'GUID=09031047217480507018; nfes_isSupportWebP=1; ibu_h5_lang=en; ibu_h5_local=en-us; nfes_isSupportWebP=1; _RSG=_Jq487AH8pAesaRsfgT64B; _RDG=28c388f1743cd62eda217a961d6f89838f; _RGUID=acc844a1-c813-44ff-b106-76aa766e4da8; Union=OUID=&AllianceID=66672&SID=1693366&SourceID=&AppID=&OpenID=&exmktID=&createtime=1679131250&Expires=1679736050225; _RF1=173.82.245.30; Hm_lvt_37b54c42b9dde393e60c88c1a84657cb=1679131247,1679311080; librauuid=; hotelhst=1164390341; Hm_lpvt_37b54c42b9dde393e60c88c1a84657cb=1679311126; _bfa=1.1679131245585.3ilkh3.1.1679311128708.1679311133651.3.15.0; _bfs=1.1; _ubtstatus=%7B%22vid%22%3A%221679131245585.3ilkh3%22%2C%22sid%22%3A3%2C%22pvid%22%3A15%2C%22pid%22%3A0%7D; _bfaStatusPVSend=1; _bfi=p1%3D10650064020%26p2%3D0%26v1%3D15%26v2%3D0; _bfaStatus=success; _pd=%7B%22_o%22%3A3%2C%22s%22%3A66%2C%22_s%22%3A0%7D',
        'if-none-match': '"10b7a-LItLBpOtqgeu4Ohauety+rpbdOg"',
        'referer': 'https://m.ctrip.com/webapp/you/gspoi/sight/25/0.html?poiId=10537405&seo=0&isHideNavBar=YES',
        'sec-ch-ua': '"Google Chrome";v="111", "Not(A:Brand";v="8", "Chromium";v="111"',
        'sec-ch-ua-mobile': '?0',
        'sec-ch-ua-platform': '"Windows"',
        'sec-fetch-dest': 'document',
        'sec-fetch-mode': 'navigate',
        'sec-fetch-site': 'same-origin',
        'sec-fetch-user': '?1',
        'upgrade-insecure-requests': '1',
        'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/111.0.0.0 Safari/537.36',
    }

    params = {
        'seo': '0',
        'businessId': businessId,
        'businessType': '11',
        'hideStatusBar': '1',
        'openapp': '5',
        'poiId': '10537405',
        'noJumpApp': 'yes',
        'from': 'https://m.ctrip.com/webapp/you/gspoi/sight/25/0.html?seo=0&poiId=10537405&isHideNavBar=YES',
    }

    try:
        response = requests.get('https://m.ctrip.com/webapp/you/commentWeb/commentList', params=params, cookies=cookies,
                            headers=headers)
    except:
        proxies = {
            'https': '127.0.0.1:7890'
        }
        response = requests.get('https://m.ctrip.com/webapp/you/commentWeb/commentList', params=params, cookies=cookies,
                                headers=headers, proxies=proxies)
    html = response.text
    soup = Bsoup(html, 'html.parser')
    script_data = json.loads(soup.find('script', attrs={'id': '__NEXT_DATA__'}).text)
    # print(script_data)
    comment_list = script_data['props']['pageProps']['initialState']['commentList']
    to_data = {}
    to_data['commentId'] = []
    to_data['userId'] = []
    to_data['userNick'] = []
    to_data['userImage'] = []
    to_data['userMember'] = []
    to_data['url'] = []
    to_data['resourceId'] = []
    to_data['resourceType'] = []
    to_data['districtId'] = []
    to_data['usefulCount'] = []
    to_data['score'] = []
    to_data['images_num'] = []
    to_data['content'] = []
    to_data['jumpH5Url'] = []
    to_data['childrenTag'] = []
    to_data['ipLocatedName'] = []
    to_data['ip'] = []
    to_data['lastModifyTime'] = []

    for comment in comment_list:
        commentId = comment['commentId']
        userId = comment['userInfo']['userId']
        userNick = comment['userInfo']['userNick']
        userImage = comment['userInfo']['userImage']
        userMember = comment['userInfo']['userMember']
        url = comment['userInfo']['url']
        resourceId = comment['resourceId']
        resourceType = comment['resourceType']
        districtId = comment['districtId']
        usefulCount = comment['usefulCount']
        score = comment['score']
        images_num = len(comment['images'])
        content = comment['content']
        jumpH5Url = comment['jumpH5Url']
        childrenTag = comment['childrenTag']
        ipLocatedName = comment['ipLocatedName']
        try:
            ip = comment['ip']
        except:
            ip = ''
        lastModifyTime = comment['lastModifyTime']
        to_data['commentId'].append(commentId)
        to_data['userId'].append(userId)
        to_data['userNick'].append(userNick)
        to_data['userImage'].append(userImage)
        to_data['userMember'].append(userMember)
        to_data['url'].append(url)
        to_data['resourceId'].append(resourceId)
        to_data['resourceType'].append(resourceType)
        to_data['districtId'].append(districtId)
        to_data['usefulCount'].append(usefulCount)
        to_data['score'].append(score)
        to_data['images_num'].append(images_num)
        to_data['content'].append(content)
        to_data['jumpH5Url'].append(jumpH5Url)
        to_data['childrenTag'].append(childrenTag)
        to_data['ipLocatedName'].append(ipLocatedName)
        to_data['ip'].append(ip)
        to_data['lastModifyTime'].append(lastModifyTime)
    print(to_data)
    return to_data
# sight_comment_csv()

# r_csv_name = 'ultimate_sight.csv'
# r_csv_path = os.path.join(os.path.abspath(
#     os.path.dirname(__file__)),
#     r_csv_name)
# sight_datas = pd.read_csv(r_csv_path, encoding='utf-8').to_dict(orient='record')
# for index in range(0, len(sight_datas)):
#     print('index: ', index)
#     sight_data = sight_datas[index]
#     businessId = sight_data['businessId']
#     try:
#         business_data = sight_comment_csv(businessId)
#     except:
#         print('{}无数据'.format(sight_data['businessId']))
#         continue
#     w_csv_name = '{}_sight_comment.csv'.format(businessId)
#     w_csv_path = os.path.join(os.path.abspath(
#         os.path.join(os.path.join(os.path.dirname(__file__), ".."), 'static/spider/data/comment')),
#         w_csv_name)
#     df = pd.DataFrame(business_data)
#     df.to_csv(w_csv_path, index=False, encoding='utf-8')