# -*- coding: utf-8 -*-
import requests  # 导入requests
import random  # 取随机数
import time
from lxml import etree
from config.conf import proxiesDEF, user, Referer_list
from config.useragent import user_agent_list
from db.mysql_conn import MysqlUtils, config
mysql = MysqlUtils(config)
# from saveCSV import saveCSV
# df = saveCSV(0).createCVS()

from pandas import DataFrame
df = DataFrame(['asin'], index=['0'], columns=['asin'])

cvsIndex = 0


def getGoodsList(_url):
    global proxies_list
    print('getGoodsDetail')
    boo = [True, False, True, False, False, False, False]
    proxies_list = ['125.46.0.62:53281', '124.11.192.216:80', '124.11.210.123:80', '163.125.223.87:8118',
                    '163.125.223.10:8118', '60.2.44.182:52143', '222.223.182.66:8000', '175.42.68.234:9999',
                    '114.101.45.138:65309', '114.25.152.52:80', '113.108.242.36:47713']
    # try:
    #     if random.choice(boo):
    #         # proxies_list = proxiesDEF()
    #         # print(proxies_list)
    # except:
    #     print('获取ip失败')
    for index in range(200):
        if(index >= 0):
            url = _url + '&page=' + str(index+1)
            print('页数=====   ' + str(index+1))
            print(url)
            getGoodsDetail(url, index+1, proxies_list)
            time.sleep(random.uniform(0.9, 1.5))

    df.to_csv('japan6a.csv', index=None)


def getGoodsDetail(url, index, proxies_list):
    time.sleep(random.uniform(0.1, 1.5))
    try:
        proxies = {'http': proxies_list[index-1]}
    except:
        proxies = {'http': proxies_list[random.randint(0, len(proxies_list)-1)]}
    headers = {
        'User-Agent': user_agent_list[random.randint(0, 4500)],
        'Referer': Referer_list[random.randint(0, len(Referer_list) - 1)]
    }
    print(proxies)
    try:
        response = requests.get(url, headers=headers, timeout=(3, 7))
        # response = requests.get(url, headers=headers, proxies=proxies, timeout=(3, 7))
        try:
            response_soup = etree.HTML(response.text)
            result_list = response_soup.xpath('//*[@id="search"]/div[1]/div[2]/div/span[3]/div[1]/div')
            remainPage(result_list)
        except:
            print('==getGoodsDetail    error')
    except:
        print('==getGoodsDetail http   error')


def remainPage(result_list):
    global cvsIndex
    # print(len(result_list))
    for li in result_list:
        try:  # 右
            url = li.xpath('./div/div/div/div[2]/div[2]/div/div[1]/div/div/div[1]/h2/a/@href')[0]
            url = 'https://www.amazon.co.jp' + url
        except:
            url = ''
        try:
            dp = url.split('dp/')[1].split('/ref=')[0]
        except:
            dp = ''
        try:
            name = li.xpath('./div/div/div/div[2]/div[2]/div/div[1]/div/div/div[1]/h2/a/span/text()')[0]
        except:
            name = ''
        try:

            img = li.xpath('./div/div/div/div[2]/div[1]/div/div/span/a/div/img/@src')[0]
        except:
            img = ''
        try:

            star = li.xpath('./div/div/div/div[2]/div[2]/div/div[1]/div/div/div[2]/div/span/a/i[1]/span/text()')[0]
            star = star.split('つ星のうち')[0]
        except:
            star = '0'
        try:

            reviews = li.xpath('./div/div/div/div[2]/div[2]/div/div[1]/div/div/div[2]/div/a/span/text()')[0]
        except:
            reviews = '0'

        goods = {
            'dp': dp,  # url中类似asin的code
            'packagetype': '',
            'packagelink': '',
            'ISBN10': '',
            'ISBN13': '',
            'asin': '',
            'rankingtype': '',  # 大类目 类型
            'ranking': '',  # 大类目 排名
            'star': star,
            'reviews': reviews,
            'moneytype': '',
            'price': 0,
            'url': url,
            'name': name,
            'img': img,
            'country': user['country'],
        }
        # print(goods)
        cvsIndex = cvsIndex + 1
        print(cvsIndex)
        inserSql(goods, cvsIndex)




def inserSql(goods, index):
    df.loc[index] = [goods['dp']]
    # sqlStr = "INSERT INTO japan (dp,packagetype,packagelink,ISBN10,ISBN13,asin,rankingtype,ranking,star,reviews,moneytype,price,url,name,img,country ) values (%s, %s, %s, %s, %s, %s, %s, %s,%s,%s,%s,%s,%s,%s,%s,%s)"
    # print(' ==dp=====  ' + goods['dp'])
    # sqlParam = [
    #     goods['dp'],
    #     goods['packagetype'],
    #     goods['packagelink'],
    #     goods['ISBN10'],
    #     goods['ISBN13'],
    #     goods['asin'],
    #     goods['rankingtype'],
    #     goods['ranking'],
    #     goods['star'],
    #     goods['reviews'],
    #     goods['moneytype'],
    #     goods['price'],
    #     goods['url'],
    #     goods['name'],
    #     goods['img'],
    #     goods['country']
    # ]
    # try:
    #     mysql.insert(sqlStr, sqlParam)
    # except:
    #     print('sql error')

if __name__ == '__main__':
    # firstList()
    getGoodsList('https://www.amazon.co.jp/s?me=AX32WLMSAE2YW')

