# -*- coding: utf-8 -*-
"""
2019年6月16日20:13:21

品牌：arctic-cat
URL: https://www.partzilla.com/catalog/arctic-cat/atv/2019/alterra-570-eps-camo-a2019xeo1push
URL: https://www.partzilla.com/catalog/arctic-cat/side-by-side/2019/cushman-hauler-4x4-ca-gray-u2019c1v4ccal

@author: wk
"""
import json
import os
import random
import re
import logging
import socket
import time

from selenium import webdriver
import requests
from urllib import request
from bs4 import BeautifulSoup

# log
logging.basicConfig(filename=os.path.join(os.getcwd(), 'data\log.txt'),
                    format='%(asctime)s  %(filename)s : %(levelname)s  %(message)s',  # 定义输出log的格式
                    datefmt='%Y-%m-%d %H:%M:%S',  # 时间
                    level=logging.DEBUG)

# 域名
domainUrl = 'https://www.partzilla.com'

# user_agent_list
user_agent_list = [
    'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/74.0.3729.169 Safari/537.36',
    'Mozilla/5.0 (Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/68.0.3440.84 Safari/537.36Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/68.0.3440.84 Safari/537.36',
    'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/74.0.3729.169 Safari/537.36',
    'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:67.0) Gecko/20100101 Firefox/67.0',
    'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/69.0.3497.100 Safari/537.36',
    'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 10.0; WOW64; Trident/7.0; .NET4.0C; .NET4.0E; .NET CLR 2.0.50727; .NET CLR 3.0.30729; .NET CLR 3.5.30729)',
    'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/64.0.3282.140 Safari/537.36 Edge/18.17763'
]
# 各大品牌
hrefs = {'ArcticCat': '/catalog/arctic-cat',
         'Can-Am': '/catalog/can-am"',
         'Honda': '/catalog/honda',
         'Kawasaki': '/catalog/kawasaki',
         'Polaris': '/catalog/polaris',
         'Sea-Doo': '/catalog/sea-doo',
         'Ski-Doo': '/catalog/ski-doo',
         'Suzuki': '/catalog/suzuki',
         'Yamaha': '/catalog/yamaha'}

row_num = 0
strs = ''
initial_detail = {'category': '',
                  'year': '',
                  'model': '',
                  'component': '',

                  'description': '',
                  'old_price': '',
                  'new_price': '',
                  'pic_url': ''
                  }

is_go = 0


def get_arctic_cat(url, old_detail):
    global is_go
    # global component_details

    # 不这样退不出递归
    if is_go == 1:
        return

    global user_agent_list

    headers = {"User-Agent": random.choice(user_agent_list)}

    time.sleep(1)

    try:
        # browser = webdriver.Chrome(executable_path="chromedriver.exe")
        # browser.get(url)
        # print(browser.page_source)

        r = requests.get(url, headers=headers, timeout=20)

    except requests.exceptions.RequestException:
        logging.debug('NET_STATUS is not good')
        print('NET_STATUS is not good')

    if r.status_code != 200:
        logging.debug('连接错误代码：' + str(r.status_code))

    soup = BeautifulSoup(r.text, features="lxml")
    # soup = BeautifulSoup(browser.page_source, features="lxml")

    is_select_category = soup.find(text="Select Category")
    is_select_year = soup.find(text="Select Year")
    is_select_model = soup.find(text="Select Model")
    is_select_com = soup.find(text="Select Component")
    is_com_html = soup.find('img', class_='pinchzoom img-responsive')

    print(is_select_category)
    print(is_select_year)
    print(is_select_model)
    print(is_select_com)
    print(is_com_html)

    kind = ''
    if is_select_category is not None:
        kind = 'category'
    if is_select_year is not None:
        kind = 'year'
    if is_select_model is not None:
        kind = 'model'
    if is_select_com is not None:
        kind = 'component'
    if is_com_html is not None:
        kind = 'com_html'

    logging.debug(
        '-------------------------------------选择页面:Select ' + kind + '-----------------------------------------')

    # 递归抓取
    if is_com_html is not None:

        table = soup.find('table', class_='table table-hover table-striped table-responsive')

        ths = table.find_all('th')
        tbody = table.find('tbody')
        tb_trs = []

        if tbody is not None:
            tb_trs = table.find('tbody').find_all('tr')
        else:
            tb_trs = table.find_all('tr')

        for index, tr in enumerate(tb_trs):
            tds = tr.find_all('td')

            logging.debug('组件循环：页面第' + str(index) + '个tr')
            print('组件循环：页面第' + str(index) + '个tr')

            if tds is not None and len(tds) > 1:

                logging.debug('组件循环：页面第' + str(index) + '个tr  is not None and len(tds) > 1')
                print('组件循环：页面第' + str(index) + '个tr  is not None and len(tds) > 1')

                global strs
                global row_num

                old_detail['description'] = tds[1].get_text().replace('\n', '').strip()

                if tds[2].get_text().replace('\r\n', '').strip() == 'Unavailable':
                    old_detail['old_price'] = 'Unavailable'
                    old_detail['new_price'] = 'Unavailable'

                elif tds[2].find('strike') is None:
                    old_detail['old_price'] = tds[2].get_text().replace('\r\n', '').strip()
                    old_detail['new_price'] = tds[2].get_text().replace('\r\n', '').strip()
                else:
                    old_detail['old_price'] = tds[2].find('strike').get_text().replace('\r\n', '')
                    old_detail['new_price'] = tds[2].get_text('||||').split('||||')[1].replace('\r\n', '').strip()

                old_detail['pic_url'] = is_com_html.get('data-src')

                # print(old_detail)
                # initial_detail

                txt_line = old_detail

                # 写入txt文件
                file_object.write(
                    txt_line['category'] + '||' + txt_line['year'] + '||' + txt_line['model'] + '||' + txt_line[
                        'component'] + '||' +
                    txt_line[
                        'description'] + '||' + str(txt_line['old_price']) + '||' + str(txt_line['new_price']) + '||' +
                    txt_line[
                        'pic_url'] + '\n')

                # strs = strs + '%%%%' + str(old_detail)
                row_num = row_num + 1

                # if row_num > 10000:
                #     is_go = 1
                #     break

                logging.debug('组件条数：' + str(row_num))

        # is_go = 1

    else:

        table = soup.find('table', class_='table table-striped table-bordered')
        a_s = table.find_all('a')

        for a1 in a_s:
            next_url = a1.get('href')
            chosed_attribute = a1.get_text()
            # old_detail['category'] = 'atv'
            old_detail[kind] = chosed_attribute

            get_arctic_cat(domainUrl + next_url, old_detail)


# -----------main-----------函数
myurl = domainUrl + hrefs['Honda'] + '/atv/2004'

with open('data\\data_honda.dat', 'w') as file_object:
    file_object.write('category||year||model||component||description||old_price||new_price||pic_url \n')
    get_arctic_cat(myurl, initial_detail)

# component_details = strs.split('%%%%')
#
# with open('data\\data_arctic_cat.dat', 'w') as file_object:
#     file_object.write('category||year||model||component||description||old_price||new_price||pic_url \n')
#     for line in component_details:
#         # logging.debug(line)
#         if line and line != "":
#             line = eval(line)
#             file_object.write(
#                 line['category'] + '||' + line['year'] + '||' + line['model'] + '||' + line['component'] + '||' + line[
#                     'description'] + '||' + str(line['old_price']) + '||' + str(line['new_price']) + '||' + line[
#                     'pic_url'] + '\n')
