import csv
import os.path
import time
from selenium import webdriver
from selenium.webdriver.chrome.service import Service
from selenium.webdriver.chrome.options import Options
from selenium.webdriver.common.by import By

# service = Service('./chromedriver.exe')
# browser = webdriver.Chrome(service = service)
# option = webdriver.ChromeOptions
# browser.get('https://www.baidu.com/')

def startBrower():
    service = Service('./chromedriver.exe')
    option = webdriver.ChromeOptions()
    option.add_experimental_option("debuggerAddress","localhost:9222")
    browser = webdriver.Chrome(service=service,options=option)
    return browser

def spiderArticleMain(url,city,typeV,typeK,pageStart,pageEnd):
    browser = startBrower()
    for page in range(pageStart,pageEnd):
        print(page)
        print('列表页码URL:' + url.format(typeV,page))
        browser.get(url.format(typeV,page))
        time.sleep(10)
        lis = browser.find_elements(by=By.XPATH,value='//div[@id="shop-all-list"]/ul/li')
        for item in lis:
            try:
                resultData = []
                city = city
                resultData.append(city)
                title = item.find_element(by=By.XPATH,value='./div[@class="txt"]/div[@class="tit"]/a/h4').text
                resultData.append(title)
                type = item.find_element(by=By.XPATH, value='./div[@class="txt"]/div[@class="tag-addr"]/a[1]/span').text
                resultData.append(type)
                adress = item.find_element(by=By.XPATH, value='./div[@class="txt"]/div[@class="tag-addr"]/a[2]/span').text
                resultData.append(adress)
                cover = item.find_element(by=By.XPATH, value='./div[@class="pic"]/a/img').get_attribute("src")
                resultData.append(cover)
                totalComment = item.find_element(by=By.XPATH, value='./div[2]/div[2]/a[1]/b').text
                resultData.append(totalComment)
                start_class = item.find_element(by=By.XPATH, value='.//*[@class="star_icon"]/span[1]').get_attribute("class")
                start = start_class.split(" ")[1].split("_")[-1]
                resultData.append(start)
                avgPrice = item.find_element(by=By.XPATH, value='./div[2]/div[2]/a[2]/b').text[1:]
                resultData.append(avgPrice)
                totalType = typeK
                resultData.append(totalType)
                detailLink = item.find_element(by=By.XPATH, value='./div[@class="pic"]/a').get_attribute("href")
                resultData.append(detailLink)
                save_to_csv(resultData)
                print(title,type,adress,cover,totalComment,start,avgPrice,totalType,detailLink)
            except:
                continue
    browser.quit()
def save_to_csv(rowData):
    with open('dataList.csv', 'a', newline='', encoding='utf-8') as wf:
        writer = csv.writer(wf)
        writer.writerow(rowData)

def init():
    if not os.path.exists('./dataList.csv'):
        with open('dataList.csv','w',newline='',encoding='utf-8') as file_obj:
            writer = csv.writer(file_obj)
            writer.writerow(['city','title','type','address','cover','totalComment','start','avgPrice','totalType','detailLink'])

if __name__ == '__main__':
    typelist = ['g112', 'g2714', 'g117', 'g508', 'g114', 'g102', 'g34303', 'g116', 'g104', 'g110']
    tNameList = ['小吃快餐', '水果生鲜', '面包甜点', '烧烤烤串', '韩国料理', '川菜', '烤肉', '西餐', '湘菜', '火锅']
    type_dict = {tNameList[i] : typelist[i] for i in range(len(tNameList))}
    print(type_dict)
    url='https://www.dianping.com/changsha/ch10/{0}p{1}'
    city='长沙'
    for k,v in type_dict.items():
        spiderArticleMain(url,city,v,k,1,50)