import json
import csv
import sys
import time
import random
import tkinter
from selenium import webdriver
from selenium.webdriver.common.by import By
from threading import Thread
from playsound import playsound
import re
import urllib.request,urllib.error
from bs4 import BeautifulSoup




# 启动浏览器

keyword = input('输入搜索关键词:')

options = webdriver.ChromeOptions()
options.add_argument("--disable-blink-features=AutomationControlled")
options.add_experimental_option('excludeSwitches', ['enable-logging'])
browser = webdriver.Chrome(options=options)
browser.get('https://www.taobao.com')


# CSV相关
csvfile = open(f'{keyword}_taobao_{time.strftime("%Y-%m-%d_%H-%M", time.localtime())}.csv', 'a', encoding='utf-8-sig',
               newline='')
csvWriter = csv.DictWriter(csvfile, fieldnames=['item_name', 'item_price', 'item_shop', 'shop_link', 'item_link','bridge'])
csvWriter.writerow(
    {'item_name': '商品名', 'item_price': '商品价格', 'item_shop': '店铺名称', 'shop_link': '店铺链接', 'item_link': '商品链接','bridge':'店铺id桥'})

# cookie相关
#正在清空Cookie
browser.delete_all_cookies()
#正在注入Cookie
try:
    with open('taobao.cookie', 'r') as f:
        cookie_list = json.load(f)
        for cookie in cookie_list:
            browser.add_cookie(cookie)
except:
    print('未找到Cookie')
#正在刷新浏览器
browser.refresh()
#'淘宝新政策，登录cookie可能失效，可在此进行手动登录，延时10秒'
time.sleep(10)
# 搜索词与页数获取
#'正在操作'
#response = requests.get(f'https://s.taobao.com/search?q={keyword}&commend=all&ssid=s5-e&search_type'
#   f'=item&sourceId=tb.index&spm=a21bo.jianhua.201856-taobao-item.2&ie=utf8&initiative_id=tbindexz_2017030'
#    f'6&&s=1 ')

browser.get(
    f'https://s.taobao.com/search?q={keyword}&commend=all&ssid=s5-e&search_type'
    f'=item&sourceId=tb.index&spm=a21bo.jianhua.201856-taobao-item.2&ie=utf8&initiative_id=tbindexz_2017030'
    f'6&&s=1 ')
browser.implicitly_wait(10)
#taobaoPage = browser.find_element(By.CSS_SELECTOR,
#                                  '#J_relative > div.sort-row > div > div.pager > ul > li:nth-child(2)').text
#taobaoPage = re.findall('[^/]*$', taobaoPage)[0]

print(browser.page_source)
soup = BeautifulSoup(browser.page_source,'html.parser')

page_count = soup.find('span',class_='page-count').get_text()
taobaoPage = int(page_count)

# 爬取页数控制
#'☞等待爬取页数'
print(f'共计{taobaoPage}页,建议每2小时总计爬取不超过20页')
page_start = 1
page_end = 2


for page in range(page_start, page_end):
    print(f'当前正在获取第{page}页，还有{page_end - page_start - page}页')
    browser.get(
        f'https://s.taobao.com/search?q={keyword}&commend=all&ssid=s5-e&search_type'
        f'=item&sourceId=tb.index&spm=a21bo.jianhua.201856-taobao-item.2&ie=utf8&initiative_id=tbindexz_2017030'
        f'6&&s={(page - 1) * 44} ')
    if browser.title == '验证码拦截':
        print('出错：如有验证请验证。等待20秒')
        playsound('error.wav')
        time.sleep(20)
    time.sleep(5)
    # 尝试获取商品列表
    try:
        print(f'当前正在获取第{page}页，还有{page_end - page_start - page}页')
        goods_arr = browser.find_elements(By.CSS_SELECTOR, '#mainsrp-itemlist > div > div > div:nth-child(1)>div')
        goods_length = len(goods_arr)
        # 遍历商品
        for i, goods in enumerate(goods_arr):
            pint(f'正在获取第{i}个,共计{goods_length}个')
            item_name = goods.find_element(By.CSS_SELECTOR,
                                           'div.ctx-box.J_MouseEneterLeave.J_IconMoreNew > div.row.row-2.title>a').text
            item_price = goods.find_element(By.CSS_SELECTOR,
                                            'div.ctx-box.J_MouseEneterLeave.J_IconMoreNew > div.row.row-1.g-clearfix > div.price.g_price.g_price-highlight > strong').text
            item_shop = goods.find_element(By.CSS_SELECTOR,
                                           'div.ctx-box.J_MouseEneterLeave.J_IconMoreNew > div.row.row-3.g-clearfix > div.shop > a > span:nth-child(2)').text
            shop_link = goods.find_element(By.CSS_SELECTOR,
                                           'div.ctx-box.J_MouseEneterLeave.J_IconMoreNew > div.row.row-3.g-clearfix > div.shop > a').get_attribute(
                'href')
            item_link = goods.find_element(By.CSS_SELECTOR,
                                           'div.pic-box.J_MouseEneterLeave.J_PicBox > div > div.pic>a').get_attribute(
                'href')
            try:
                b=shop_link.split('https://store.taobao.com/shop/view_shop.htm?user_number_id=')[1]
            except:
                b=shop_link
            csvWriter.writerow(
                {'item_name': item_name, 'item_price': item_price, 'item_shop': item_shop, 'shop_link': shop_link,
                 'item_link': item_link,'bridge':b})
            csvfile.flush()
    except:
        # 拉取商品列表失败则提示需要验证
        print(f'出错：如有验证请验证。等待20秒')
        print(f'注意:第<{page}>页将跳过如需获取请重新运行程序！')
        playsound('error.wav')
        time.sleep(20)

    delay_time = random.randint(10, 30)
    for delay in range(delay_time):
        print(f'第{page}页，还有{page_end - page_start - page}页')
        print(f'延时翻页：已延时{delay}秒，剩余{delay_time}秒')
        time.sleep(1)

print('程序结束')
print('程序结束正在保存文件')
csvfile.close()
print('保存文件完成，准备退出中')
time.sleep(5)
browser.close()
sys.exit()
