# -*- coding: utf-8 -*-
from selenium import webdriver                                      # 失败（无法点击搜索按钮跳转至新的页面）
from lxml import etree
from fake_useragent import UserAgent
import os
import csv
import pymysql


def FileSave(save_path, filename, results):  # 保存为TXT文档
    if not os.path.exists(save_path):  # 判断文件路径是否存在，若不在，则创建
        os.mkdir(save_path)
    path = save_path + "/" + filename + ".txt"
    with open(path, 'a+', encoding='utf-8') as fp:
        for i in results:
            fp.write("%s\n" % (i))


def write_to_file(save_path, results):  # 保存为CSV文件
    if not os.path.exists(save_path):  # 判断文件路径是否存在，若不在，则创建
        os.mkdir(save_path)
    with open(save_path + '/'+'笔记本电脑统计.csv', 'a+', newline='') as f:
        fieldnames = ['name', 'price', 'url', 'num', 'shop']
        writer = csv.DictWriter(f, fieldnames=fieldnames)
        writer.writeheader()
        try:
            writer.writerows(results)
        except:
            pass


def write_to_mysql(name, price, url, num, shop):
    # 打开数据库连接
    db = pymysql.connect("localhost", "root", "123456", "test")

    # 使用 cursor() 方法创建一个游标对象 cursor
    cursor = db.cursor()

    sql = "INSERT INTO tb_computer(name, price, url, num, shop) VALUES ('{}', '{}', '{}', '{}', '{}') ".format(name,
                                                                                                                price,
                                                                                                                url,
                                                                                                                num,
                                                                                                                shop)
    try:
        # 执行sql语句
        cursor.execute(sql)
        # 提交到数据库执行
        db.commit()
    except:
        # 如果发生错误则回滚
        db.rollback()

    # 关闭数据库连接
    db.close()


def Page_Level(save_path, filename, myPage, choose):  # 一级页面：频道名称和频道url
    dom = etree.HTML(myPage)
    zip_name_urls = []
    all_messages = []
    channel_names0 = dom.xpath('//*[@id="ItemWrapper"]/div/a/div[2]/span/@title')  # 配置
    channel_names1 = dom.xpath('//*[@id="ItemWrapper"]/div/a/div[2]/p[1]/span[1]/strong/text()')  # 价格
    channel_names2 = dom.xpath('//*[@id="ItemWrapper"]/div/a/div[2]/p[2]/span[2]/text()')  # 付款人数
    channel_names3 = dom.xpath('//*[@id="ItemWrapper"]/div/a/div[2]/p[2]/span[1]/text()')  # 店铺
    channel_names4 = dom.xpath('//*[@id="ItemWrapper"]/div/a/@href')  # 地址

    for name, price, num, shop, url in zip(channel_names0, channel_names1, channel_names2, channel_names3, channel_names4):
        if choose == 3:
            write_to_mysql(name, price, url, num, shop)
        elif choose == 1:
            all_message = name + "\t" + price + "\t" + url + "\t" + num + "\t" + shop
            all_messages.append(all_message)
        else:
            lk = {}
            lk['name'] = name
            lk['price'] = price
            lk['url'] = url
            lk['num'] = num
            lk['shop'] = shop
            zip_name_urls.append(lk)
    if choose == 2:
        write_to_file(save_path, zip_name_urls)
    elif choose == 1:
        FileSave(save_path, filename, all_messages)
    else:
        pass

def spider(myPage, choose):
    save_path = 'E:\\百度文库下载'  # 保存内容
    filename = u"笔记本电脑统计"
    Page_Level(save_path, filename, myPage, choose)  # 爬取


if __name__ == "__main__":

    base_url = 'https://uland.taobao.com/sem/tbsearch?refpid=mm_26632258_3504122_32538762&clk1=e4453a161fce889e813ff3235&keyword=%E7%AC%94%E8%AE%B0%E6%9C%AC%E7%94%B5%E8%84%91&page=0'
    headers = {
        "User-Agent": UserAgent().random
    }
    text = input("请输入您想要爬取的电脑相关信息:")
    t = int(input("请输入您想要爬取的页数:"))
    print("\n请选择您希望的文件存储方式：\n")
    choose = int(input("1.txt文档  2.CSV文件   3.写入MySQL数据库"))

    driver = webdriver.PhantomJS()
    driver.get(base_url)
    driver.find_element_by_id('q').clear()
    driver.find_element_by_id('q').send_keys(text)
    driver.find_element_by_xpath('//*[@id="J_searchForm"]/input[4]').click()

    now_handles = driver.current_window_handle
    print(now_handles)  # 输出当前窗口句柄
    handles = (driver.window_handles)  # 获取当前窗口句柄集合（列表类型）
    print(handles)  # 输出句柄集合

    for i in range(t):
        driver.find_element_by_xpath('//*[@id="J_waterfallPagination"]/div/div/a[4]').click()
        print('switch to ', handles[1])
        driver.switch_to_window(handles[1])
        html = driver.page_source
        spider(html, choose)

    driver.close()  # 关闭当前窗口
    driver.switch_to_window(handles[0])  # 切换回原窗口

    driver.quit()




