# -*- coding: utf-8 -*-
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from lxml import etree
from fake_useragent import UserAgent
import os
import csv
import re
import pymysql
import urllib.request

# 全局配置
save_path = ' '  # 文件路径
headers = {
    "User-Agent": UserAgent().random
}


# 保存为TXT文档
def FileSave(filename, results):
    if not os.path.exists(save_path):  # 判断文件路径是否存在，若不在，则创建
        os.mkdir(save_path)
    path = save_path + os.path.sep + filename + ".txt"
    with open(path, 'a+', encoding='utf-8') as fp:
        for i in results:
            fp.write("%s\n" % (i))


# 保存为CSV文件
def write_to_file(filename, results, fieldnames):
    if not os.path.exists(save_path):  # 判断文件路径是否存在，若不在，则创建
        os.mkdir(save_path)
    with open(save_path + os.path.sep + filename + '.csv', 'a+', newline='') as f:
        writer = csv.DictWriter(f, fieldnames=fieldnames)
        writer.writeheader()
        try:
            writer.writerows(results)
        except:
            pass


# 执行SQL指令
def save_db(password, database, table, sql_instruct):
    # 打开数据库连接
    db = pymysql.connect("localhost", "root", password, database)
    # 使用 cursor() 方法创建一个游标对象 cursor
    cursor = db.cursor()
    cursor.execute("DROP TABLE IF EXISTS {}".format(table))
    try:
        # 执行sql语句
        cursor.execute(sql_instruct)
        # 提交到数据库执行
        db.commit()
    except:
        # 如果发生错误则回滚
        db.rollback()
    # 关闭数据库连接
    db.close()


# 创建SQL指令
def build_sql(table):
    # 建库
    sql1 = "CREATE TABLE {} (     )".format(table)

    # 插入
    sql2 = "INSERT INTO {}(     ) VALUES () WHERE ".format(table)

    # 删除
    sql3 = "DELETE FROM {} WHERE      ".format(table)

    # 更新
    sql4 = "UPDATE {} SET   WHERE   ".format(table)

    # 修改
    sql5 = "ALTER TABLE {}      ".format(table)

    # 查询
    sql6 = "SELECT {} ".format(table)

    sql = sql1
    return sql


# 下载图片
def save_imgs(filename, img_name_urls):
    path = save_path + os.path.sep + filename
    if not os.path.exists(path):  # 判断文件路径是否存在，若不在，则创建
        os.mkdir(path)
    for i in img_name_urls:
        name = i['name']
        img_url = i['img']
        name = re.sub('\W', '_', name)  # 数据清洗，替换所有特殊字符，以免无法作为文件名
        path_name = path + os.path.sep + name + '.jpg'
        urllib.request.urlretrieve(img_url, path_name)  # 打开图片地址，下载图片保存在本地，


# 一级页面爬取
def Page_Level(filename, myPage):  # 一级页面
    dom = etree.HTML(myPage)
    results = []
    channel_names0 = dom.xpath(' ')
    channel_names1 = dom.xpath(' ')

    for i, j in zip(channel_names0, channel_names1):
        pass
    return results


# 爬虫
def spider(myPage, title):
    filename = title
    results = Page_Level(filename, myPage)  # 爬取
    # 选择保存方式


# 运行
if __name__ == "__main__":

    url = input("请输入网址")
    title = input("该网址下载的内容是啥：")
    t = int(input("请输入您想要爬取的页数:"))

    driver = webdriver.Firefox()
    driver.get(url)
    for i in range(t):
        driver.execute_script('window.scrollTo(0,document.scrollHeight)')  # 滑动滚动条
        myPage = driver.page_source  # 获取源码

        spider(myPage, title)  # 下面的xpath用于定位选中元素的相邻元素
        driver.find_element_by_xpath(' ').click()  # 点击下一页
    driver.quit()
