# -*- coding: utf-8 -*-
import requests  # 导入requests
from bs4 import BeautifulSoup  # 从bs4中导入BeautifulSoup
# from lxml.html import etree
import random  # 取随机数
import time
from db.mysql_conn import MysqlUtils, config, sqlStr
mysql = MysqlUtils(config)
from config.conf import user_agent_list, proxiesDEF, user
import re
# 二级目录 list
second_list = []
second_namelist = []
# 三级目录 list
third_list = []
third_namelist = []
# 四级目录 list
fourth_list = []
fourth_namelist = []


# 页面的跳转，及商品asin的获取，及 ranking的获取
def jumpUrl(_url, callback, Boo):
    print(callback)
    url = _url
    try:
        response = requests.get(url, timeout=(3, 7))
    except:
        time.sleep(5)
        response = requests.get(url, timeout=(3, 7))
    response_soup = BeautifulSoup(response.text, 'html.parser')
    if Boo:
        callback(response_soup)
        # 商品详情获取

    getGoodsDeDetailOne(response_soup)


def firstList():
    # 第一层目录
    url = 'https://www.amazon.com/Best-Sellers-Health-Personal-Care/zgbs/hpc/ref=zg_bs_nav_0'
    print('firstUrl=======================================================')
    # 跳转页面 并获取第一个list，获取 asin 并跳往详情页
    jumpUrl(url, secondList, True)
    jumpUrl(url.split('ref=')[0]+'ref=zg_bs_pg_2?_encoding=UTF8&pg=2', secondList, False)
    for index, secondUrl in enumerate(second_list):
        print('secondUrl=     '+ str(index) +'=    ' + second_namelist[index] + '=====================================================')
        jumpUrl(secondUrl, thirdList, True)
        jumpUrl(secondUrl.split('ref=')[0]+'ref=zg_bs_pg_2?_encoding=UTF8&pg=2', thirdList, False)
        for ind, thirdUrl in enumerate(third_list):
            print('thirdUrl==  '+ str(ind) +'=    '+ third_namelist[ind] +'=|||||||||||||==secondUrl=   ' + str(index) + '=  ' + second_namelist[index])
            jumpUrl(thirdUrl, fourthList, True)
            jumpUrl(thirdUrl.split('ref=')[0] + 'ref=zg_bs_pg_2?_encoding=UTF8&pg=2', fourthList, False)
            print(len(fourth_list))
            for i, fourthUrl in enumerate(fourth_list):
                print('fourthUrl== '+ str(i) +'=   '+ fourth_namelist[i] + '     =|||||||=thirdUrl==    ' + str(ind) +'== =='+ third_namelist[ind]+ '       =|||||||secondUrl= '+ str(index) +'==' + second_namelist[index])
                jumpUrl(fourthUrl, fourthList, False)
                jumpUrl(fourthUrl.split('ref=')[0] + 'ref=zg_bs_pg_2?_encoding=UTF8&pg=2', fourthList, False)
            del fourth_list[:]
            print(len(fourth_list))
            del fourth_namelist[:]
            print(len(fourth_namelist))
        del third_list[:]
        del third_namelist[:]
    print('=====================================跑完了===============================================================')


def secondList(response_soup):
    # 二级目录 list  保存
    second_tree = response_soup.select('#zg_browseRoot > ul > ul > li > a')
    for ahref in second_tree:
        second_list.append(ahref.attrs['href'])
        second_namelist.append(ahref.string)


def thirdList(response_soup):
    # 二级目录 list  保存  baby & child care
    third_tree = response_soup.select('#zg_browseRoot > ul > ul > ul > li > a')
    for ahref in third_tree:
        third_list.append(ahref.attrs['href'])
        third_namelist.append(ahref.string)


def fourthList(response_soup):
    # 二级目录 list  保存
    fourth_tree = response_soup.select('#zg_browseRoot > ul > ul > ul > ul > li > a')
    for ahref in fourth_tree:
        fourth_list.append(ahref.attrs['href'])
        fourth_namelist.append(ahref.string)


def getGoodsDeDetailOne(response_soup):
    print('getGoodsDeDetailOne')
    # goodsArr = []  # 一个页面中的商品 所有集合
    # goods 信息统计   一共有两个页面，页面一 和 页面二
    result_list = response_soup.find_all(name='li', attrs={"class": "zg-item-immersion"})
    for li in result_list:
        try:
            url = 'https://www.amazon.com' + li.find('a').attrs['href']
        except:
            url = ''
        try:
            asin = url.split('dp/')[1].split('?_encoding')[0]
        except:
            asin = ''
        try:
            name = li.find('a').find(name='div', attrs={'aria-hidden': 'true'}).string.strip()
        except:
            name = ''
        try:
            img = li.find('img').attrs['src']
        except:
            img = ''
        try:
            star = li.find(name='span', attrs={'class': 'a-icon-alt'}).string
            star = star.split(' out')[0]
        except:
            star = '0'
        try:
            reviews = list(li.select('a'))[2].string.replace(',', '')
        except:
            reviews = '0'
        try:
            pricestr = li.find('span', 'p13n-sc-price').string
            moneytype = pricestr[0:1]
            price = pricestr.replace(moneytype, '')
        except:
            price = 0
            moneytype = ''

        goods = {
            'country': user['country'],
            'asin': asin,
            'rankingtype': '',  # 大类目 类型
            'ranking': '',  # 大类目 排名
            'star': star,
            'reviews': reviews,
            'moneytype': moneytype,
            'price': price,
            'url': url,
            'name': name,
            'img': img,
        }
        # goodsArr.append(goods)
        print(goods['asin'])
        sqlParam = [goods['country'], goods['asin'], '', 0, goods['star'], goods['reviews'],
                    goods['moneytype'], goods['price'], goods['url'], goods['name'], goods['img']]
        mysql.insert(sqlStr, sqlParam)
    # getGoodsDeDetailTwo(goodsArr)


def getGoodsDeDetailTwo (goodsArr):
    print('getGoodsDeDetailTwo')
    proxies_list = proxiesDEF()
    for index, goods in enumerate(goodsArr):
        url = 'https://www.amazon.com/dp/' + goods['asin']
        proxies = {'http': proxies_list[random.randint(0, len(proxies_list)-1)]}
        headers = {
            'User-Agent': user_agent_list[random.randint(0, 17)],
            'Referer': 'https://www.amazon.com/AmazonBasics-Performance-Alkaline-Batteries-Count/dp/B00MNV8E0C/ref=zg_bs_hpc_1?_encoding=UTF8&refRID=8FREZHENHY56J5CVNYYG&th=1'
        }
        try:
            # response = requests.get(url, headers=headers)
            print(proxies)
            response = requests.get(url, headers=headers, proxies=proxies, timeout=(3,7))
            # tree = etree.HTML(response.text.encode('utf-8'))
            # try:
            #     rankstr = tree.xpath('//li[@id="SalesRank"]/text()')[1].split('in')
            #     ranking = rankstr[0].split('#')[1].strip().replace(',', '')
            #     rankingtype = rankstr[1].split('(')[0].strip()
            # except:
            #     ranking = 0
            #     rankingtype = ''
            try:
                htmlstr = response.content.decode('utf-8')
                restr = re.compile('#(.*)\(<a href="/gp/bests')
                rankarr = restr.findall(htmlstr)[0].split(' in ')
                ranking = rankarr[0].replace(',', '')
                rankingtype = rankarr[1]
            except:
                ranking = 0
                rankingtype = ''
            if ranking == 0:
                try:
                    htmlstr = response.content.decode('utf-8')
                    restr = re.compile("<span>#(.*) \(<a href='/gp/b")
                    rankarr = restr.findall(htmlstr)[0].split(' in ')
                    ranking = rankarr[0].replace(',', '')
                    rankingtype = rankarr[1]
                except:
                    ranking = 0
                    rankingtype = ''
            if ranking == 0:
                try:
                    htmlstr = response.content.decode('utf-8')
                    restr = re.compile('#(.*)\(<a href="/gp/bests')
                    rankarr = restr.findall(htmlstr)[0].split(' in ')
                    ranking = rankarr[0].replace(',', '')
                    rankingtype = rankarr[1]
                except:
                    ranking = 0
                    rankingtype = ''
            if ranking == 0:
                print('=======没有采集到数据')
                # print(response.content.decode('utf-8'))

            goodsArr[index]['ranking'] = ranking
            print(goods['asin'] + '    ' + str(goodsArr[index]['ranking']))
            sqlParam = [goods['country'], goods['asin'], rankingtype, ranking, goods['star'], goods['reviews'],
                        goods['moneytype'], goods['price'], goods['url'], goods['name'], goods['img']]
            mysql.insert(sqlStr, sqlParam)
        except:
            print('get 请求失败 跳往下一个')



if __name__ == '__main__':
    firstList()




