# -*- coding: utf-8 -*-
import json
import random
import time
import pandas as pd

import undetected_chromedriver as uc
from selenium.webdriver.common.by import By


def login_for_cookies(url):
    driver = uc.Chrome()
    driver.get(url)
    input("登陆成功后回车以继续")
    cookies = driver.get_cookies()  # 已经获取到了cookies

    with open("cookies.json", "w", encoding="utf-8") as cks:  # 把cookies使用json保存
        json.dump(cookies, cks)


def get_cookies():
    with open("cookies.json", "r", encoding="utf-8") as cks:  # 从json文件中获取之前保存的cookie
        return json.load(cks)


def getUrl(url):
    driver = uc.Chrome()
    driver.implicitly_wait(30)

    driver.get(url)
    time.sleep(3)
    driver.delete_all_cookies()  # 先清除无效cookie

    for i in get_cookies():  # 添加的核心，已经保存的cookies是个list，其中的才是cookie，使用for循环添加
        driver.add_cookie(i)

    url_list = []
    price_list = []
    for i in range(1, 199, 2):
        url = f'https://search.jd.com/Search?keyword=%E7%AC%94%E8%AE%B0%E6%9C%AC%E7%94%B5%E8%84%91&psort=1&suggest=1.his.0.0&wq=%E7%AC%94%E8%AE%B0%E6%9C%AC%E7%94%B5%E8%84%91&psort=1&pvid=3f2ec1a9d8934b8096312237c8e91968&isList=0&page={i}&s=61&click=0&log_id=1703553423605.4237'

        driver.get(url)

        driver.implicitly_wait(30)

        time.sleep(random.random() * 6)  # 随机sleep 6秒

        aa = driver.find_elements(By.XPATH, './/div[@class="p-name p-name-type-2"]/a')
        price = driver.find_elements(By.XPATH, './/div/div[@class="p-price"]/strong/i')

        for a in aa:
            url_list.append(a.get_attribute('href'))

        for pr in price:
            price_list.append(pr.text)

    df = pd.DataFrame([url_list, price_list]).T
    print(df)
    df.to_csv('url_csv.csv', header=['url', 'price'])


def getData(df):
    driver = uc.Chrome()
    driver.implicitly_wait(15)

    driver.get(url)

    time.sleep(random.random() * 6)  # 随机sleep 6秒

    driver.delete_all_cookies()  # 先清除无效cookie

    for i in get_cookies():  # 添加的核心，已经保存的cookies是个list，其中的才是cookie，使用for循环添加   `
        driver.add_cookie(i)

    url_list = df['url'].values

    brand = ''
    price = ''
    comment = ''
    product = ''
    title = ''

    for u, i in zip(url_list[1041:1500], list(df.index)[1041:1500]):
        boo = True
        try:
            driver.get(u)

            time.sleep(random.random() * 6)  # 随机sleep 6秒

            title = driver.find_element(By.XPATH, './/div[@class="sku-name"]').text
            price = driver.find_element(By.XPATH, './/span[@class="p-price"]').text

            brand = driver.find_element(By.XPATH, '//*[@id="parameter-brand"]/li/a').text
            product = driver.find_element(By.XPATH, '//*[@id="detail"]/div[2]/div[1]/div[1]/ul[2]/li[1]').text

            try:
                comment = driver.find_element(By.XPATH, './/div[@class="comment-count item fl"]').text.replace('\n', '')
            except Exception as e:
                comment = 0

        except Exception as e:
            print('这一页出现了意外，跳过爬取!')  # 要是弹出验证码滑块或者是预购商品 就跳过那一个爬取下一个链接
            boo = False

        if boo:
            print(f'爬取第{i}页完成!')

        with open('JDData.csv', 'a+', encoding='utf-8') as f:
            f.write(f'{brand},{price},{comment},{product},{title}' + '\n')

    print('数据爬取完毕!')


if __name__ == '__main__':
    url = 'https://www.jd.com/'
    # login_for_cookies(url)
    # getUrl(url)

    df = pd.read_csv('url_csv.csv')
    getData(df)
