import multiprocessing
import os
import queue
import random
import re
import threading
from time import sleep
import pandas as pd
import requests
from lxml import etree
import json
import time


url1 = "https://www.amazon.com/gp/part-finder-ajax/asGetMakes.html?vehicleType=automotive&year={}"
url2 = "https://www.amazon.com/gp/part-finder-ajax/asGetModels.html?vehicleType=automotive&year={}&makeId={}"
# url3 = "https://www.amazon.com/Denso-234-4621-Oxygen-Sensor/dp/B000C5YCUM/ref=au_as_r"
# url3 = "https://www.amazon.com/Walker-Products-250-24001-4-Wire-Oxygen/dp/B000C94H76/ref=sr_1_1"
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
datas_file = os.path.join(BASE_DIR, "datas_total.xlsx")
writer = pd.ExcelWriter(datas_file)
requests.adapters.DEFAULT_RETRIES = 3
s = requests.session()
s.keep_alive = False


def proxy_ip():
    proxyHost = "http-proxy-t2.dobel.cn"
    proxyPort = "9180"
    # 账号密码
    proxyUser = "GUIXUQIG2BCNKM80"
    proxyPass = "9Bb66Yz7"
    proxyMeta = "http://%(user)s:%(pass)s@%(host)s:%(port)s" % {
        "host": proxyHost,
        "port": proxyPort,
        "user": proxyUser,
        "pass": proxyPass,
    }
    proxies = {
        "http": proxyMeta,
        "https": proxyMeta,
    }
    return proxies


def get_ua():
    first_num = random.randint(55, 62)
    third_num = random.randint(0, 3200)
    fourth_num = random.randint(0, 140)
    os_type = [
        '(Windows NT 6.1; WOW64)', '(Windows NT 10.0; WOW64)', '(X11; Linux x86_64)',
        '(Macintosh; Intel Mac OS X 10_12_6)'
    ]
    chrome_version = 'Chrome/{}.0.{}.{}'.format(first_num, third_num, fourth_num)

    ua = ' '.join(['Mozilla/5.0', random.choice(os_type), 'AppleWebKit/537.36',
                   '(KHTML, like Gecko)', chrome_version, 'Safari/537.36']
                  )
    return ua
headers = {"user-agent": get_ua()}
# headers = {"user-agent": get_ua(), "host": "www.amazon.com"}
# headers = {"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36"}


def save_data(writer, datas):
    if os.path.exists(datas_file):
        try:
            df = pd.read_excel(datas_file)
            df = df.append(datas)
        except:
            df = pd.DataFrame(datas)
    else:
        df = pd.DataFrame(datas)
    # df.to_excel(excel_writer=writer, columns=[i for i in datas[0].keys()], index=False, sheet_name=sheet_name, encoding="utf-8")
    df.to_excel(excel_writer=writer, columns=[i for i in datas[0].keys()], index=False, encoding="utf-8")


sem = threading.Semaphore(8)
sem1 = multiprocessing.Semaphore(4)


def check(year, make, model, good_url):
    try:
        param = {
            "_encoding": "UTF8",
            "Make": "{} | {}",
            "Model": "{} | {}",
            "Year": "{} | {}",
            "ie": "UTF8",
            "n": "15684181",
            "newVehicle": "1",
            "s": "automotive",
            "vehicleId": "6",
            "vehicleType": "automotive"
        }
        good_url = good_url.split("?")[0]
        # print(good_url)
        sku_id = re.search(r"dp/(.*)/", good_url).group(1)
        datasList = []
        datasDict = {}
        # 检查是否相配
        param["Make"] = "{}|{}".format(make["value"], make["key"])
        param["Model"] = "{}|{}".format(model["value"], model["key"])
        param["Year"] = "{}|{}".format(year, year)
        sleep(1)
        resp3 = s.get(good_url, headers=headers, params=param, timeout=5).text
        # print(resp3)
        html = etree.HTML(resp3)
        isNot = html.xpath("//div[@id='asStripeWrap']//span[@id='asTitle']/text()")[0]
        datasDict["Year"] = year
        datasDict["Make"] = make["value"]
        datasDict["Model"] = model["value"]
        if isNot != "This does not fit your:":
            # print(datasDict)
            datasDict["isNot"] = 1
            # HTML提取trim
            trim = 20
            # 遍历trim请求获取engine
            engine_url = "https://www.amazon.com/gp/part-finder-ajax/asGetAttributes.html?year={}&makeId={}&modelId={}&trim={}&attrList=Engine".format(
                year, make["key"], model["key"], trim)
            resp4 = s.get(engine_url, headers=headers, timeout=5).text
            engines = json.loads(resp4)["result"]["Engine"]
            engines_id = [en for en in json.loads(resp4)["result"]["Engine"].keys()]
            # engines_name = [en for en in json.loads(resp4)["result"]["Engine"].values()]
            # 遍历engine检查是否相配
            for engine_id in engines_id:
                datasList = []
                try:
                    check_url = "https://www.amazon.com/gp/part-finder-ajax/asCheckFit.html"
                    check_param = {
                        "year": year,
                        "makeName": make["value"],
                        "makeId": make["key"],
                        "modelName": model["value"],
                        "modelId": model["key"],
                        "vehicleType": "automotive",
                        "asin": sku_id,
                        "Trim": trim,
                        "Engine": engine_id
                    }
                    datasDict["Trim"] = "Base"
                    datasDict["Engine"] = engines[engine_id]
                    sleep(1)
                    resp5 = s.get(check_url, headers=headers, params=check_param, timeout=5).text
                    datasDict["Answer"] = json.loads(resp5)["answer"]
                    if datasDict["Answer"] == "yes":
                        datasDict["Note"] = json.loads(resp5)["note"]
                    # print(datasDict)
                    datasList.append(datasDict)
                    save_data(writer, datasList)
                    writer.save()
                except Exception as e0:
                    print("E0:", e0)
        else:
            datasDict["isNot"] = 0
            datasList.append(datasDict)
            save_data(writer, datasList)
            writer.save()
        print(datasDict)
    except Exception as e1:
        print("E1:", e1)


def main(year, good_url):
    with sem:
        try:
            # 获取此年所有车的品牌
            resp1 = s.get(url1.format(year), headers=headers, timeout=5, proxies=proxy_ip()).text
            makes = [ma for ma in json.loads(resp1)["makes"]]
            # makes = [{"value":"Ford", "key":"54"}]
            for make in makes:
                # 获取该品牌所有型号
                try:
                    resp2 = s.get(url2.format(year, make["key"]), headers=headers, timeout=5).text
                    models = [mo for mo in json.loads(resp2)["models"]]
                    for model in models:
                        check(year, make, model, good_url)
                except Exception as e2:
                    print("E2:", e2)
        except Exception as e3:
            print("E3:", e3)
        writer.close()


def get_urls():
    import xlrd
    # 打开Excel文件读取数据
    data = xlrd.open_workbook('氧传感Amazon参考链接(1).xlsx')
    sheet1 = data.sheet_by_name('Sheet1')
    return sheet1.col_values(5)


def run_thread(good_url):
    with sem1:
        print(good_url)
        q = queue.Queue()
        for year in range(2021, 1969, -1):
        # for year in range(2016, 2022):
        # for year in range(1968, 2022):
            t = threading.Thread(target=main, args=(year, good_url))
            q.put(t)
        while not q.empty():
            t = q.get()
            q.task_done()
            t.start()
        q.join()
        #     main(year, good_url)


def run():
    # q = queue.Queue()
    # for good_url in get_urls():
    #     sleep(3)
    #     t = multiprocessing.Process(target=run_thread, args=(good_url,))
    #     q.put(t)
    # while not q.empty():
    #     t = q.get()
    #     q.task_done()
    #     t.start()
    # q.join()
    # good_url = "https://www.amazon.com/Walker-Products-250-24001-4-Wire-Oxygen/dp/B000C94H76/ref=sr_1_1"
    for good_url in get_urls():
        run_thread(good_url)


if __name__ == '__main__':
    start = time.time()
    run()
    end = time.time()
    print("共耗时{}秒".format(end-start))