# -*- coding: utf-8 -*-
import requests  # 导入requests
from lxml import etree
import random  # 取随机数
import time
from db.mysql_conn import MysqlUtils, config, sqlStr
mysql = MysqlUtils(config)
from config.conf import proxiesDEF, user, Referer_list
from config.useragent import user_agent_list
# 二级目录 list
second_list = []
second_namelist = []
# 三级目录 list
third_list = []
third_namelist = []
proxies_list = ['125.46.0.62:53281', '124.11.192.216:80', '124.11.210.123:80','163.125.223.87:8118', '163.125.223.10:8118', '60.2.44.182:52143','222.223.182.66:8000', '175.42.68.234:9999', '114.101.45.138:65309','114.25.152.52:80', '113.108.242.36:47713']
def firstList():
    # time.sleep(10)
    # 第一层目录
    url = 'https://www.amazon.com/s?i=stripbooks&bbn=465600&rh=n%3A283155%2Cn%3A%212349030011%2Cn%3A465600&dc&qid=1563756721&ref=sr_ex_n_1'
    print('firstUrl=======================================================')
    # # 跳转页面 并获取第一个list，
    jumpUrl(url, secondList, True, False)
    for index, secondUrl in enumerate(second_list):
        print('secondUrl=     ' + str(index) + '=    ' + second_namelist[index] + '=====================================================')
        if index > 10:
            jumpUrl(secondUrl, thirdList, True, True)
            for ind, thirdUrl in enumerate(third_list):
                if ind >= 0:
                    print('thirdUrl==  ' + str(ind) + '=    ' + third_namelist[ind] +'=|||||||||||||==secondUrl=   ' + str(index) + '=  ' + second_namelist[index])
                    getGoodsList(thirdUrl)
                    print('============休息一下====9s=============')
                    time.sleep(random.uniform(6.7, 9.3))
            print('============休息一下=======90s==========')
            try:
                time.sleep(random.uniform(60.7, 90.3))
            except:
                print('time err')
        del third_list[:]
        del third_namelist[:]

    print('=====================================跑完了===============================================================')


# 页面的跳转，及商品asin的获取，及 ranking的获取
def jumpUrl(_url, callback, Boo, isC):
    global proxies_list
    print('大链接')
    print(_url)
    time.sleep(random.uniform(3.1, 6.5))
    url = _url
    proxies = {'http': proxies_list[random.randint(0, len(proxies_list) - 1)]}
    headers = {
        'User-Agent': user_agent_list[random.randint(0, 4500)],
        'Referer': Referer_list[random.randint(0, len(Referer_list) - 1)]
    }
    print(proxies)
    try:
        response = requests.get(url, headers=headers, proxies=proxies, timeout=(3, 7))
    except:
        time.sleep(random.uniform(9.1, 10.3))
        response = requests.get(url, headers=headers, proxies=proxies, timeout=(3, 7))
    response_soup = etree.HTML(response.text)
    if Boo:
        callback(response_soup)
        # 商品详情获取
    if isC:
        getGoodsList(url)


def secondList(response_soup):
    # 二级目录 list  保存
    second_tree = response_soup.xpath('//*[@id="leftNav"]/ul/ul/div/li/span/a/@href')
    second_tree2 = response_soup.xpath('//*[@id="leftNav"]/ul/ul/div/li/span/a//text()')
    for index, ahref in enumerate(second_tree):
        second_list.append('https://www.amazon.com' + ahref)
        second_namelist.append(second_tree2[index])


def thirdList(response_soup):
    # 三级目录 list  保存
    third_tree = response_soup.xpath('//*[@id="leftNav"]/ul/ul/div/li/span/a/@href')
    third_tree2 = response_soup.xpath('//*[@id="leftNav"]/ul/ul/div/li/span/a//text()')
    for index, ahref in enumerate(third_tree):
        third_list.append('https://www.amazon.com' + ahref)
        third_namelist.append(third_tree2[index])


def getGoodsList(_url):
    global proxies_list
    print('getGoodsDetail')
    boo = [True, False, True, False, False, False, False]
    try:
        if random.choice(boo):
            proxies_list = proxiesDEF()
            print(proxies_list)
    except:
        print('获取ip失败')
    for index in range(75):
        url = _url + '&page=' + str(index+1)
        print('页数=====   ' + str(index+1))
        print(url)
        getGoodsDetail(url, index+1, proxies_list)
        time.sleep(random.uniform(0.7, 1.3))



def getGoodsDetail(url, index, proxies_list):
    time.sleep(random.uniform(0.1, 1.5))
    try:
        proxies = {'http': proxies_list[index-1]}
    except:
        proxies = {'http': proxies_list[random.randint(0, len(proxies_list)-1)]}
    headers = {
        'User-Agent': user_agent_list[random.randint(0, 4500)],
        'Referer': Referer_list[random.randint(0, len(Referer_list) - 1)]
    }
    print(proxies)
    try:
        response = requests.get(url, headers=headers, proxies=proxies, timeout=(3, 7))
        try:
            response_soup = etree.HTML(response.text)
            if index == 1:
                result_list = response_soup.xpath('//*[@id="mainResults"]/ul/li')
                firstPage(result_list)
            else:
                result_list = response_soup.xpath('//*[@class ="s-result-list s-search-results sg-row"]/div')
                remainPage(result_list)
        except:
            print('==getGoodsDetail    error')
    except:
        print('==getGoodsDetail http   error')



def firstPage(result_list):
    for li in result_list:
        try:
            url = li.xpath('.//div/div[@class="a-fixed-left-grid"]/div/div[@class="a-fixed-left-grid-col a-col-right"]/div[@class="a-row a-spacing-small"]/div[1]/a/@href')[0]
        except:
            url = ''
        try:
            dp = url.split('dp/')[1].split('/ref=')[0]
        except:
            dp = ''
        try:
            name = li.xpath('.//div/div[@class="a-fixed-left-grid"]/div/div[@class="a-fixed-left-grid-col a-col-right"]/div[@class="a-row a-spacing-small"]/div[1]/a/h2//text()')[0]
        except:
            name = ''
        try:
            img = li.xpath('.//div/div[@class="a-fixed-left-grid"]/div/div[@class="a-fixed-left-grid-col a-col-left"]/div/div/a/img/@src')[0]
        except:
            img = ''
        try:
            star = li.xpath('.//div/div[@class="a-fixed-left-grid"]/div/div[@class="a-fixed-left-grid-col a-col-right"]/div[@class="a-row"]//div[2]/div/span/span/a/i[1]/span//text()')[0]
            star = star.split(' out')[0]
        except:
            star = '0'
        try:
            reviews = li.xpath('.//div/div[@class="a-fixed-left-grid"]/div/div[@class="a-fixed-left-grid-col a-col-right"]/div[@class="a-row"]//div[2]/div/a//text()')[0].replace(',', '')
        except:
            reviews = '0'
        # try:
        #     # 判断 是否有paperbackbook
        #     Paperback = li.xpath('.//*[contains(@data-attribute,"Paperback")]/text()')[0]
        # except:
        #     Paperback = ''
        # try:
        #     # 判断 是否有paperbackbook
        #     PaperbackLink = li.xpath('.//*[contains(@title,"Paperback")]/@href')[0]
        #
        # except:
        #     PaperbackLink = ''
        #
        # try:
        #     # 判断 是否有paperbackbook
        #     Hardcover = li.xpath('.//*[contains(@data-attribute,"Hardcover")]/text()')[0]
        # except:
        #     Hardcover = ''
        # try:
        #     # 判断 是否有paperbackbook
        #     HardcoverLink = li.xpath('.//*[contains(@title,"Hardcover")]/@href')[0]
        # except:
        #     HardcoverLink = ''
        #
        # if Paperback != '' or PaperbackLink != '':
        #     goods = {
        #         'dp': dp,  # url中类似asin的code
        #         'packagetype': Paperback,
        #         'packagelink': PaperbackLink,
        #         'ISBN10': '',
        #         'ISBN13': '',
        #         'asin': '',
        #         'rankingtype': '',  # 大类目 类型
        #         'ranking': '',  # 大类目 排名
        #         'star': star,
        #         'reviews': reviews,
        #         'moneytype': '',
        #         'price': 0,
        #         'url': url,
        #         'name': name,
        #         'img': img,
        #         'country': user['country'],
        #     }
        #     inserSql(goods)
        #
        # if Hardcover != '' or HardcoverLink != '':
        #     goods = {
        #         'dp': dp,  # url中类似asin的code
        #         'packagetype': Hardcover,
        #         'packagelink': HardcoverLink,
        #         'ISBN10': '',
        #         'ISBN13': '',
        #         'asin': '',
        #         'rankingtype': '',  # 大类目 类型
        #         'ranking': '',  # 大类目 排名
        #         'star': star,
        #         'reviews': reviews,
        #         'moneytype': '',
        #         'price': 0,
        #         'url': url,
        #         'name': name,
        #         'img': img,
        #         'country': user['country'],
        #
        #     }
        #     inserSql(goods)
        goods = {
            'dp': dp,  # url中类似asin的code
            'packagetype': '',
            'packagelink': '',
            'ISBN10': '',
            'ISBN13': '',
            'asin': '',
            'rankingtype': '',  # 大类目 类型
            'ranking': '',  # 大类目 排名
            'star': star,
            'reviews': reviews,
            'moneytype': '',
            'price': 0,
            'url': url,
            'name': name,
            'img': img,
            'country': user['country'],
        }
        inserSql(goods)

def remainPage(result_list):
    for li in result_list:
        try:  # 右
            url = li.xpath(
                './div/div[@class="s-include-content-margin s-border-bottom"]/div[@class="a-section a-spacing-medium"]/div[2]/div[2]/div[@class="sg-col-inner"]/div[1]/div/div/div[@class="a-section a-spacing-none"]/h2/a/@href')[
                0]
            url = 'https://www.amazon.com' + url
        except:
            url = ''
        try:
            dp = url.split('dp/')[1].split('/ref=')[0]
        except:
            dp = ''
        try:
            name = li.xpath(
                './div/div[@class="s-include-content-margin s-border-bottom"]/div[@class="a-section a-spacing-medium"]/div[2]/div[2]/div[@class="sg-col-inner"]/div[1]/div/div/div[@class="a-section a-spacing-none"]/h2/a/span/text()')[
                0]
        except:
            name = ''
        try:
            img = li.xpath(
                './div/div[@class="s-include-content-margin s-border-bottom"]/div[@class="a-section a-spacing-medium"]/div[2]/div[1]/div/div/span/a/div/img/@src')[
                0]
        except:
            img = ''
        try:
            star = li.xpath(
                './div/div[@class="s-include-content-margin s-border-bottom"]/div[@class="a-section a-spacing-medium"]/div[2]/div[2]/div[@class="sg-col-inner"]/div[1]/div/div/div[@class="a-section a-spacing-none a-spacing-top-micro"]/div/span[1]/@aria-label')[
                0]
            star = star.split(' out')[0]
        except:
            star = '0'
        try:
            reviews = li.xpath(
                './div/div[@class="s-include-content-margin s-border-bottom"]/div[@class="a-section a-spacing-medium"]/div[2]/div[2]/div[@class="sg-col-inner"]/div[1]/div/div/div[@class="a-section a-spacing-none a-spacing-top-micro"]/div/span[2]/a/span/text()')[
                0]
        except:
            reviews = '0'

        goods = {
            'dp': dp,  # url中类似asin的code
            'packagetype': '',
            'packagelink': '',
            'ISBN10': '',
            'ISBN13': '',
            'asin': '',
            'rankingtype': '',  # 大类目 类型
            'ranking': '',  # 大类目 排名
            'star': star,
            'reviews': reviews,
            'moneytype': '',
            'price': 0,
            'url': url,
            'name': name,
            'img': img,
            'country': user['country'],
        }
        inserSql(goods)


def inserSql(goods):
    print(' ==dp=====  ' + goods['dp'])
    sqlParam = [
        goods['dp'],
        goods['packagetype'],
        goods['packagelink'],
        goods['ISBN10'],
        goods['ISBN13'],
        goods['asin'],
        goods['rankingtype'],
        goods['ranking'],
        goods['star'],
        goods['reviews'],
        goods['moneytype'],
        goods['price'],
        goods['url'],
        goods['name'],
        goods['img'],
        goods['country']
    ]
    try:
        mysql.insert(sqlStr, sqlParam)
    except:
        print('sql error')

if __name__ == '__main__':
    firstList()




