import csv
import os.path
import time
import pandas as pd
from selenium import webdriver
from selenium.webdriver.chrome.service import Service
from selenium.webdriver.chrome.options import Options
from selenium.webdriver.common.by import By

def startBrower():
    service = Service('./chromedriver.exe')
    option = webdriver.ChromeOptions()
    option.add_experimental_option("debuggerAddress","localhost:9222")
    option.add_argument(
        "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/133.0.0.0 Safari/537.36")
    browser = webdriver.Chrome(service=service,options=option)
    return browser


def spider_out(detail):
    print('详情页URL:' + detail)
    browser = startBrower()
    browser.get(detail)
    time.sleep(1)

    try:
        all_join = browser.find_element(by=By.XPATH, value='//span[@class="scoreText wx-text"]').text
        print(all_join)
        parts = all_join.split()
        for part in parts:
            key, value = part.split(':')
            if '口味' in key:
                tasterate = float(value)
            elif '环境' in key:
                envsrate = float(value)
            elif '服务' in key:
                serverate = float(value)

    except:
        tasterate = 0
        envsrate = 0
        serverate = 0

    save_to_csv([detail, tasterate, envsrate, serverate])
    browser.quit()


def init():
    if not os.path.exists('./detailList.csv'):
        with open('detailList.csv','w',newline='',encoding='utf-8') as file_obj:
            writer = csv.writer(file_obj)
            writer.writerow(['detailLink','tasterate','envsrate','serverate'])

def save_to_csv(rowData):
    with open('detailList.csv', 'a', newline='', encoding='utf-8') as wf:
        writer = csv.writer(wf)
        writer.writerow(rowData)


if __name__ == '__main__':
    df = pd.read_csv('dataList.csv')
    column_10 = df.iloc[:,9].tolist()
    for detail in column_10:
        spider_out(detail)
