import random
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
import openpyxl
import time
from lxml import etree
import pymysql
from queue import Queue

chrome_options = Options()   # chromeoptions 是一个方便控制 chrome 启动时属性的类
chrome_options.add_argument('--no-sandbox') # 以最高权限运行
chrome_options.add_argument('--headless') # 无头模式
chrome_options.add_argument("--proxy-server=http://220.179.210.103:35916")
chrome_options.add_argument('--disable-gpu')
chrome_options.add_argument('--disable-dev-shm-usage')
chrome_options.add_experimental_option("prefs", {"profile.manages_default_content_settings.images": 2}) # 禁止图片加载
chrome_options.add_experimental_option('excludeSwitches', ['enable-automation']) # 开发者模式





def Cycle_operation(data_list):
    time.sleep(random.randint(1, 3))
    for i in range(20):
        js = "var q=document.documentElement.scrollTop=" + str(700 + 350 * i)
        browser.execute_script(js)
        time.sleep(0.5)
    js = 'var q=document.documentElement.scrollTop=4200'
    browser.execute_script(js)
    time.sleep(random.randint(1, 3))
    page_text = browser.page_source
    data_list.append(page_text)

def getdata(brand, pages, starturl):
    data_list = []
    browser.get(starturl)
    browser.maximize_window()
    print(f'正在捕捉 {brand} 的第 1 页')
    Cycle_operation(data_list)
    for i in range(1, int(pages)):
        button = browser.find_element_by_xpath('//*[@id="J_bottomPage"]/span[1]/a[@title="使用方向键右键也可翻到下一页哦！"]')
        button.click()
        print(f'正在捕捉 {brand} 的第 {i+1} 页')
        Cycle_operation(data_list)
    return data_list


def parse(data_list):
    html_queue = Queue()
    for html in data_list:
        tree = etree.HTML(html)
        brand = ''.join(tree.xpath('//*[@id="J_crumbsBar"]/div/div/a/em/text()'))
        list = tree.xpath('//*[@id="J_goodsList"]/ul/li')
        for li in list:
            if li.xpath('./@data-spu')[0]:
                details = 'https:' + ''.join(li.xpath('./div/div[1]/a/@href'))
                img = 'https:' + ''.join(li.xpath('./div/div[1]/a/img/@src'))
                price = ''.join(li.xpath('./div/div[2]/strong/i/text()'))+ '元'
                name = ''.join(li.xpath('./div/div[3]/a/em/text()'))
                evaluation = ''.join(li.xpath('./div/div[4]/strong/a/text()'))+ '人评价'
                store = ''.join(li.xpath('./div/div[5]/span/a/text()'))
                label = '-'.join(li.xpath('./div/div[6]/i/text()'))
                a = (brand, name, evaluation, price, label, store, details, img)
                html_queue.put(a)
    print(' 解析部分 完成  ')
    return html_queue

def write_in(html_queue):
    conn = pymysql.connect(host='xxxxx',
                           port=3306,
                           user='xxxxx',
                           password='xxxxxx',
                           db='text',
                           charset='utf8')
    for data in range(1, html_queue.qsize()+1):
        i = html_queue.get()
        cursor = conn.cursor()
        sql = 'insert into JD_DATA(brand, name, evaluation, price, label, store, details, img) values(%s,%s,%s,%s,%s,%s,%s,%s)'
        cursor.execute(sql, (i[0], i[1], i[2], i[3], i[4], i[5], i[6], i[7]))
        conn.commit()
        cursor.close()
    conn.close()
    print('写入数据库  完成')
    return None




if __name__ =='__main__':
    browser = webdriver.Chrome(executable_path='D:\\python\\chromedriver.exe', options=chrome_options)
    wb = openpyxl.load_workbook('url_info.xlsx')
    ws = wb.active
    scope = ws.iter_rows(min_row=2, max_row=19, min_col=1, max_col=3)
    for rows in scope:
        brand = rows[0].value
        pages = rows[1].value
        starturl = rows[2].value
        data_list = getdata(brand=brand,pages=pages, starturl=starturl)
        html_queue = parse(data_list)
        write_in(html_queue)
    browser.quit()
    print('全部任务完成 ！！！')

