# -*- coding: utf-8 -*-
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from lxml import etree
from fake_useragent import UserAgent
import os
import csv
import re
import pymysql
import urllib.request


def FileSave(save_path, filename, results):  # 保存为TXT文档
    if not os.path.exists(save_path):  # 判断文件路径是否存在，若不在，则创建
        os.mkdir(save_path)
    path = save_path + "/" + filename + ".txt"
    with open(path, 'a+', encoding='utf-8') as fp:
        for i in results:
            fp.write("%s\n" % (i))


def write_to_file(save_path, filename, results):  # 保存为CSV文件
    if not os.path.exists(save_path):  # 判断文件路径是否存在，若不在，则创建
        os.mkdir(save_path)
    with open(save_path + '/'+ filename +'.csv', 'a+', newline='') as f:
        fieldnames = ['name', 'price', 'url', 'num', 'shop']
        writer = csv.DictWriter(f, fieldnames=fieldnames)
        writer.writeheader()
        try:
            writer.writerows(results)
        except:
            pass


def write_to_mysql(name, price, url, num, shop):
    # 打开数据库连接
    db = pymysql.connect("localhost", "root", "5201314", "student")

    # 使用 cursor() 方法创建一个游标对象 cursor
    cursor = db.cursor()

    sql = "INSERT INTO tb_computer(name, price, url, num, shop) VALUES ('{}', '{}', '{}', '{}', '{}') ".format(name,
                                                                                                                price,
                                                                                                                url,
                                                                                                                num,
                                                                                                                shop)
    try:
        # 执行sql语句
        cursor.execute(sql)
        # 提交到数据库执行
        db.commit()
    except:
        # 如果发生错误则回滚
        db.rollback()

    # 关闭数据库连接
    db.close()

def save_imgs(save_path, filename, imgs):
    path = save_path + "\\" + filename
    if not os.path.exists(path):  # 判断文件路径是否存在，若不在，则创建
        os.mkdir(path)
    for i in imgs:
        name =i['name']
        img_url = i['img']
        name = re.sub('\W', '_', name)                  #数据清洗，替换所有特殊字符，以免无法作为文件名
        path_name = path + '\\'+ name + '.jpg'
        urllib.request.urlretrieve(img_url, path_name)  # 打开图片地址，下载图片保存在本地，


def Page_Level(save_path, filename, myPage, choose,choose1):  # 一级页面：频道名称和频道url
    dom = etree.HTML(myPage)
    zip_name_urls = []
    all_messages = []
    imgs = []
    channel_names0 = dom.xpath('//*[@id="ItemWrapper"]/div/a/div[2]/span/@title')  # 配置
    channel_names1 = dom.xpath('//*[@id="ItemWrapper"]/div/a/div[2]/p[1]/span[1]/strong/text()')  # 价格
    channel_names2 = dom.xpath('//*[@id="ItemWrapper"]/div/a/div[2]/p[2]/span[2]/text()')  # 付款人数
    channel_names3 = dom.xpath('//*[@id="ItemWrapper"]/div/a/div[2]/p[2]/span[1]/text()')  # 店铺
    channel_names4 = dom.xpath('//*[@id="ItemWrapper"]/div/a/@href')  # 地址
    channel_names5 = dom.xpath('//*[@id="ItemWrapper"]/div/a/div[1]/span/img/@src')  # 图片

    for name, price, num, shop, url ,img in zip(channel_names0, channel_names1, channel_names2, channel_names3, channel_names4, channel_names5):
        if choose1 == 1:
            imgs_o = {}
            imgs_o['name'] =name
            imgs_o['img'] =  'https:' + img
            imgs.append(imgs_o)
        if choose == 3:
            write_to_mysql(name, price, url, num, shop)
        elif choose == 1:
            all_message = name + "\t" + price + "\t" + url + "\t" + num + "\t" + shop
            all_messages.append(all_message)
        else:
            lk = {}
            lk['name'] = name
            lk['price'] = price
            lk['url'] = url
            lk['num'] = num
            lk['shop'] = shop
            zip_name_urls.append(lk)
    if choose == 2:
        write_to_file(save_path, filename, zip_name_urls)
    elif choose == 1:
        FileSave(save_path, filename, all_messages)
    else:
        pass
    if choose1 == 1:
        save_imgs(save_path, filename, imgs)

def spider(myPage, choose,choose1, title):
    save_path = 'H:\\新建文件夹\\京东'  # 保存内容
    filename = title
    Page_Level(save_path, filename, myPage, choose,choose1)  # 爬取


if __name__ == "__main__":
    url = input("请输入网址")
    title = input("该网址下载的内容是啥：")
    t = int(input("请输入您想要爬取的页数:"))
    print("\n请选择您希望的文件存储方式：\n")
    choose = int(input("1.txt文档  2.CSV文件   3.写入MySQL数据库"))
    print("\n请选择您是否希望保存相应图片\n")
    choose1 = int(input("1.是    2.否"))
    driver = webdriver.Firefox()
    driver.get(url)
    for i in range(t):
        for i in range(120):            # 定位到body上面，键盘发送 向下 的键（模拟用户向下滑动，否则无法获得img的url）
            driver.find_element_by_xpath('//*[@id="body1240"]').send_keys(Keys.ARROW_DOWN)
        html = driver.page_source
        headers = {
            "User-Agent": UserAgent().random
        }

        spider(html, choose, choose1, title)     # 下面的xpath用于定位选中元素的相邻元素
        driver.find_element_by_xpath(
            '//*[@id="J_waterfallPagination"]/div/div/span[@class="page-cur"]/following-sibling::a[1]').click()
    driver.quit()