# !/usr/bin/env python3
# @author : walker
# @data : 2019/9/11

from get_company_urls import *
from config import *
from multiprocessing import Pool
from use_company_data_find_goods import *
# from use_company_url_find_goods import *
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.common.exceptions import TimeoutException
import time
import re
from use_goods_url_find_goods_data import *

#以下第三步需要
#定义加载chromedriver
# browser = webdriver.Chrome()
#等待时间
# wait = WebDriverWait(browser,10)

goods_location = 0


'''
    先获取到所有厂商的URL地址，获取完之后注释掉下面两行代码
    param:
        ALL_COMPANY_URL : 爬取到所有模型厂商所需要的url页面
    return:
        获取到所有模型厂商的商品详情地址、名字、产量并存入mongod
'''
def get_and_save_company_urls():
    company_urls = Get_company_urls()
    company_urls.parse_company_page(ALL_COMPANY_URL)


'''
    根据上面所得的每个厂商的商品详细地址，获取到每个公司的手办信息并存入mongod
    pram:
        company_data : 每个厂商商品的详细地址和厂商名
    return:
        将company_data对应的所有prototype goods存入mongod
'''
def use_company_url_get_and_save_prototype_data(company_data):
    use_company_data_find_goods = Use_company_data_find_goods()
    use_company_data_find_goods.parse_company_page(company_data)

'''
    通过selenium模拟点击company的url去获取到每个厂商共存在有多少页数据
    param:
        company_url : 每个厂商的详细url地址
    return:
        返回页面数目
'''
def use_company_url_find_goods():
    try:
        html = browser.page_source
        soup = BeautifulSoup(html,'lxml')
        num_data = soup.findAll(name = "a",attrs={"class": "nums"})
        l = []
        for num_item in num_data:
            #判断是否为数字，如果是则加入list
            if num_item.text.isnumeric():
                l.append(int(num_item.text))
        print(l)
        # total = wait.until(EC.presence_of_element_located((By.CSS_SELECTOR,"#searchPageStyle > div > a.end.num.nums")))
        return max(l)
    except TimeoutException:
        print("~~~~~~~~~~~~~~~~~~~~~~~~~")
        print("加载某一个厂商的页面失败！")
        print("~~~~~~~~~~~~~~~~~~~~~~~~~")
        return use_company_url_find_goods()

'''
    使用selenium完成翻页功能
'''
def use_page_number_get_goods_data(current_number):
    try:
        print("current_number is " , current_number)
        parse_page()
        click_page = "#searchPageStyle > div > a:nth-child(" + str(current_number) + ")"
        print(click_page)
        # if soup.find(name = "div",attrs={"class" : "col-sm-6 col-md-3"}):
        submit = wait.until(EC.element_to_be_clickable((By.CSS_SELECTOR,"#searchPageStyle > div > a.next.num.nums")))

        submit.click()
        time.sleep(2)
        number = wait.until(EC.presence_of_element_located((By.CSS_SELECTOR,"#searchPageStyle > div > span")))
        print(number.text)

    except TimeoutException:
        print("第%d页出了问题！" % current_number)
        use_page_number_get_goods_data(current_number)

'''
    解析当前页面的信息并保存每个手办的地址进mongod
'''
def parse_page():
    try:
        wait.until(EC.presence_of_element_located((By.CSS_SELECTOR,"#toysearchResult")))
        html = browser.page_source
        soup = BeautifulSoup(html,'lxml')
        # print(html)
        prototype_data = soup.findAll(name = "a",attrs={"class": "thumbnail"})
        for url_item in prototype_data:
            # print(url_item.get("href"))
            db_prototype_dataset_info['goods_url_info'].save({"url":url_item.get("href")})
        return soup
    except TimeoutException:
        print("解析goods page出错！")
        return parse_page()

'''
    通过每个公司的地址去爬取该公司的数据
    pram:
        locals: 记录到第几个公司了
        company_data: 每个公司对应的详细信息
    return:
        将每个公司对应的所有手办信息的地址都保存到mongod中
'''
def spider_by_each_company_url(locals,company_data):
    try:
        company_url = company_data.get("company_goods_url")
        print("~~~~~~~~~~~~~~~~~~",company_url)
        browser.get(URL + company_url)
        total_page_number = use_company_url_find_goods()
        for i in range(2,int(total_page_number)+1):
            use_page_number_get_goods_data(i)
        # browser.close()

    except:
        # demo(locals + 1,company_data)
        print("%s公司的商品已经爬取完毕" % locals)

'''
    定义多线程
    param:
        goods_url: 手办详细地址
    return:
        goods_url: 手办详细地址
        img_url_list: 存储失败的图片地址
        failed_sign: 失败标签，成功为0，失败为1
'''
def pool_get_goods_data(goods_url):
    get_goods_data = use_url_get_goods_data()
    img_url_list,failed_sign = get_goods_data.save_prototype_info_and_img_data(goods_url.get('url'),1)

    #将获取失败的图片存入mongod中
    for failed_img_url in img_url_list:
        db_prototype_dataset_info['failed_img_url_data'].save({"goods_url":goods_url,"failed_url":failed_img_url})

    #返回是否失败的信号，0是成功，1是失败
    return failed_sign


def main():
    # 第一步,获取所有company的url、name、numbers
    # get_and_save_company_urls()

    # 第二步，根据第一步所得数据，开启多线程爬取每个公司对应的网页，获取到每个公司对应的手办数据url
    # 这一步放在第三步处理了
    # company_data_info = db_prototype_dataset_info['prototype_company_data_info'].find({}).batch_size(1)
    # company_data = list(company_data_info)
    # company_data = company_data[2:]
    #
    # #第三步，使用seleinium模拟点击浏览器
    # locals = 1
    # for company_item in company_data:
    #     spider_by_each_company_url(locals,company_item)
    #     locals += 1

    #第四步，根据goods_url找到对应手办的详细信息
    #获取到手办url
    get_goods_data = use_url_get_goods_data()
    goods_url_list = list(db_prototype_dataset_info['goods_url_info'].find({}).skip(15000).limit(5000))

    # 多线程获取手办数据
    pool = Pool(processes = 10)
    failed_goods_number_list = pool.map(pool_get_goods_data,goods_url_list)
    failed_goods_number =  sum(failed_goods_number_list)
    print("此次获取手办信息失败的个数为:",failed_goods_number)


    # #单线程获取手办数据
    # i = 0
    # #定义记录失败的个数
    # failed_goods_number = 0
    # for goods_url in goods_url_list:
    #     print("iiiiiiiiiiiiiiiiiiiii-------------",i)
    #     #获取到goods_url对应的手办信息和手办图片地址
    #     img_url_list,failed_sign = get_goods_data.save_prototype_info_and_img_data(goods_url.get('url'),i)
    #     i += 1
    #     for failed_img_url in img_url_list:
    #         db_prototype_dataset_info['failed_img_url_data'].save({"goods_url":goods_url,"failed_url":failed_img_url})
    #     failed_goods_number += failed_sign
    # print("此次获取手办信息失败的个数为:",failed_goods_number)

if __name__ == '__main__':
    main()
