# -*- coding: utf-8 -*-
import time
import random
import requests
from requests.adapters import HTTPAdapter
from bs4 import BeautifulSoup
import re
import json
import lxml
import traceback
import codecs
import datetime
import threading
from retrying import retry
from multiprocessing.dummy import Pool as Threadpool
import logging
import pymysql

from sqlalchemy import create_engine, MetaData, Table, Column, distinct
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.pool import NullPool
from sqlalchemy.dialects.mysql import INTEGER, VARCHAR, TEXT
from sqlalchemy.orm import sessionmaker

logging.basicConfig(format='%(asctime)s - %(filename)s[line:%(lineno)d] - %(levelname)s: %(message)s',
                    level=logging.INFO,
                    filename='AmazonCrawler.log',
                    filemode='a')
requests.adapters.DEFAULT_RETRIES = 8  # 增加重连次数
requests.packages.urllib3.disable_warnings()
Base = declarative_base()


class Amazon(Base):  # 继承生成的orm基类
    __tablename__ = "amazon_reviews"  # 表名
    review_id = Column(VARCHAR(255), primary_key=True)  # 设置主键
    profile_name = Column(VARCHAR(255))
    review_date = Column(VARCHAR(255))
    review_body = Column(TEXT)
    page = Column(INTEGER)
    asin = Column(VARCHAR(20))

    def __init__(self, review_id, profile_name, review_date, review_body, page, asin):
        self.review_id = review_id
        self.profile_name = profile_name
        self.review_date = review_date
        self.review_body = review_body
        self.page = page
        self.asin = asin


class AmazonCrawler:

    def __init__(self):
        # ORM基类
        self.Amazon = Amazon
        # sqlalchemy连接数据库的session
        self.db_session = None
        # headers里的参数
        self.downlink_list = ['1.75', '10', '2.1', '2.25', '2.7', '2.2', '1.8', '2.15']

        # 请求次数
        self.get_count = 0
        # 请求成功数
        self.success_count = 0
        # 评论列表
        self.content_list = []
        # 错误信息字典，key为错误信息，value为错误次数
        self.err_dict = {}
        self.ua_dict = {'Firefox': ['Mozilla/5.0 (Windows NT 6.1; WOW64; rv:77.0) Gecko/20190101 Firefox/77.0',
                                   'Mozilla/5.0 (X11; Linux ppc64le; rv:75.0) Gecko/20100101 Firefox/75.0',
                                   'Mozilla/5.0 (X11; Linux; rv:74.0) Gecko/20100101 Firefox/74.0',
                                   'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.13; rv:61.0) Gecko/20100101 Firefox/73.0',
                                   'Mozilla/5.0 (X11; OpenBSD i386; rv:72.0) Gecko/20100101 Firefox/72.0',
                                   'Mozilla/5.0 (Windows NT 6.3; WOW64; rv:71.0) Gecko/20100101 Firefox/71.0',
                                   'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:70.0) Gecko/20191022 Firefox/70.0',
                                   'Mozilla/5.0 (Windows NT 10.0; WOW64; rv:69.2.1) Gecko/20100101 Firefox/69.2',
                                   'Mozilla/5.0 (Windows NT 6.1; rv:68.7) Gecko/20100101 Firefox/68.7',
                                   'Mozilla/5.0 (X11; Linux i686; rv:64.0) Gecko/20100101 Firefox/64.0',
                                   'Mozilla/5.0 (X11; Linux i586; rv:63.0) Gecko/20100101 Firefox/63.0',
                                   'Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10.10; rv:62.0) Gecko/20100101 '
                                   'Firefox/62.0',
                                   'Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10.13; ko; rv:1.9.1b2) Gecko/20081201 '
                                   'Firefox/60.0',
                                   'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) '
                                   'Firefox/58.0.1',
                                   'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:54.0) Gecko/20100101 Firefox/58.0',
                                   'Mozilla/5.0 (Windows NT 5.0; Windows NT 5.1; Windows NT 6.0; Windows NT 6.1; '
                                   'Linux; es-VE; '
                                   'rv:52.9.0) Gecko/20100101 Firefox/52.9.0',
                                   'Mozilla/5.0 (Windows NT 6.3; WOW64; rv:52.59.12) Gecko/20160044 Firefox/52.59.12',
                                   'Mozilla/5.0 (X11; Ubuntu i686; rv:52.0) Gecko/20100101 Firefox/52.0',
                                   'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9a1) Gecko/20060814 Firefox/51.0',
                                   'Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10.10; rv:62.0) Gecko/20100101 '
                                   'Firefox/49.0',
                                   'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:46.0) Gecko/20120121 Firefox/46.0',
                                   'Mozilla/5.0 (Windows NT 10.0; WOW64; rv:45.66.18) Gecko/20177177 Firefox/45.66.18',
                                   'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:40.0) Gecko/20100101 Firefox/40.1'],
                       'Chrome': [
                           'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) '
                           'Chrome/70.0.3538.77 Safari/537.36',
                           'Mozilla/5.0 (X11; Ubuntu; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) '
                           'Chrome/55.0.2919.83 Safari/537.36',
                           'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_8_3) AppleWebKit/537.36 (KHTML, like Gecko) '
                           'Chrome/54.0.2866.71 '
                           'Safari/537.36',
                           'Mozilla/5.0 (X11; Ubuntu; Linux i686 on x86_64) AppleWebKit/537.36 (KHTML, like Gecko) '
                           'Chrome/53.0.2820.59 '
                           'Safari/537.36',
                           'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_2) AppleWebKit/537.36 (KHTML, like Gecko) '
                           'Chrome/52.0.2762.73 '
                           'Safari/537.36',
                           'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_8_4) AppleWebKit/537.36 (KHTML, like Gecko) '
                           'Chrome/49.0.2656.18 '
                           'Safari/537.36',
                           'Mozilla/5.0 (Windows NT 6.2; WOW64) AppleWebKit/537.36 (KHTML like Gecko) '
                           'Chrome/44.0.2403.155 Safari/537.36',
                           'Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2228.0 '
                           'Safari/537.36',
                           'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_1) AppleWebKit/537.36 (KHTML, like Gecko) '
                           'Chrome/41.0.2227.1 '
                           'Safari/537.36',
                           'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2227.0 '
                           'Safari/537.36',
                           'Mozilla/5.0 (Windows NT 6.3; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) '
                           'Chrome/41.0.2226.0 Safari/537.36',
                           'Mozilla/5.0 (Windows NT 6.4; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) '
                           'Chrome/41.0.2225.0 Safari/537.36',
                           'Mozilla/5.0 (Windows NT 5.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2224.3 '
                           'Safari/537.36',
                           'Mozilla/5.0 (Windows NT 10.0) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/40.0.2214.93 '
                           'Safari/537.36']}
        self.img_headers = {
            "authority": "images-na.ssl-images-amazon.com",
            "method": "GET",
            "scheme": "https",
            "accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,"
                      "application/signed-exchange;v=b3;q=0.9",
            "accept-encoding": "gzip, deflate, br",
            "accept-language": "zh-CN,zh;q=0.9,zh-TW;q=0.8,en-US;q=0.7,en;q=0.6",
            "sec-fetch-dest": "document",
            "sec-fetch-mode": "navigate",
            "sec-fetch-site": "none",
            "sec-fetch-user": "?1",
            'connection': 'close',
            "upgrade-insecure-requests": "1",
        }
        self.code_header = {
            "authority": "www.amazon.com",
            "method": "GET",
            "scheme": "https",
            "accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,"
                      "application/signed-exchange;v=b3;q=0.9",
            "accept-encoding": "gzip, deflate, br",
            "accept-language": "zh-CN,zh;q=0.9,zh-TW;q=0.8,en-US;q=0.7,en;q=0.6",
            "sec-fetch-dest": "document",
            "sec-fetch-mode": "navigate",
            "sec-fetch-site": "none",
            "sec-fetch-user": "?1",
            'connection': 'close',
            "upgrade-insecure-requests": "1",
        }
        # headers
        self.shop_headers = {
            "authority": "www.amazon.com",
            "method": "GET",
            "scheme": "https",
            "accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,"
                      "application/signed-exchange;v=b3;q=0.9",
            "accept-encoding": "gzip, deflate, br",
            "accept-language": "zh-CN,zh;q=0.9,zh-TW;q=0.8,en-US;q=0.7,en;q=0.6",
            # 未登录
            # "cookie": 'sp-cdn="L5Z9:CN"; ubid-main=134-8468621-4495316; s_nr=1591953958615-Repeat; s_vnum=2023949268528%26vn%3D2; s_dslv=1591953958616; sst-main=Sst1|PQHsCZI5d9FjlDyRCtBKpjCNC_z50retkASuOJkkAicYf7mZn8eqtzS7R9QtOZGriIdEk_xhAoTkYcfYd2LobOer5pge1QRSFYRw-Z-Zw19zezcfbKK6KZETX6IlftgEEAZTlZo3cOG6Lf9v3-9t6WcxqobIaSyqtUMSRJAcW8Kw2-AkADcR7UfH2WLllHBFiPIwqPVKnj1nCAjhmjwzB47cIVJ4jh2owqdhRgpvdHBBWPLf-X4PDoO8vCY8ymiZ7dGdTYoUBsX_Ne88kQklr7yfTRtejdhH2ZP-LBGOH873yQRSa4KbIaLY5Gi-hpBuSUaXBILGYlCj2ojwBJuDMAsWuw; i18n-prefs=USD; x-wl-uid=1QsTj97b5/0n2p7kOcGpmyV2DKqLijo1Fq86OTs3n0j7Qwp7KFrOlKmNhUQNQ5OJk/SBVzN1pG831JqtHRYc0LRBII3BNKcrj1KsQXMzX18n7IouALXCjmlM2QT8Dk3A5c5Fw5gPx6a8=; skin=noskin; x-amz-captcha-1=1592370854461372; x-amz-captcha-2=Jg9TMmqzenSGbYgrCYnt6w==; session-id=139-5890879-8867853; session-id-time=2082787201l; session-token=HBbQwo/fsXb533hIBUfMES4SBmS+BT32rg+zWTpFNUpKelLmeVNUWS7W9gAsfFwA539zPVgF1P22Vb+9rvgc1xTs3Q8oEPAwXtdwcVUI9VKttYKQoFjMfuqOqjJ/LmLcbTkEnODydlThlOxfIQSj+4Xgo60XeEWxXmYyhRDurI0GdNIfZ3EbD2CwpIMN8z9z; csm-hit=tb:s-AV6C10MNT8DC94R27KQD|1592447054369&t:1592447056618&adb:adblk_no',
            # 登录
            # "cookie": 'sp-cdn="L5Z9:CN"; ubid-main=134-8468621-4495316; s_nr=1591953958615-Repeat; s_vnum=2023949268528%26vn%3D2; s_dslv=1591953958616; x-wl-uid=1QsTj97b5/0n2p7kOcGpmyV2DKqLijo1Fq86OTs3n0j7Qwp7KFrOlKmNhUQNQ5OJk/SBVzN1pG831JqtHRYc0LRBII3BNKcrj1KsQXMzX18n7IouALXCjmlM2QT8Dk3A5c5Fw5gPx6a8=; skin=noskin; x-amz-captcha-1=1592370854461372; x-amz-captcha-2=Jg9TMmqzenSGbYgrCYnt6w==; session-id=139-5890879-8867853; session-token="3m4S8CgBQHR7Bydz6bs4YkT5EGKCPxIWLj5MEo472f96EZ1Nd1en++/X/FqPUl78SYV8UHXsoJUmmM8tIh/X3Ef1YZuxtd4GMHYjdmckr5Tvo5N0abqCt4n9tvLRz11jaPYFwGpxUuvwausvb92IT/avvb9WHBS+fMc9zi/8/yb2Y06wuja9QEApJblN/Xnv7GwzPvwx+WkWVYfDJGvkbOGXvyb9x1YL494AQQ50RUM="; x-main="IHtuRUkpgG?HOOjZhymH8hloItJ7eaZNXU8qQt2tkjEs66m4?MIjJavxlTWeDrl5"; at-main=Atza|IwEBIHgPCBWzRfh7xbPnZ6IHNmgJHmTAKTJG8826DNiVZHBoFT0VZ9xZxlMLYGO-KZ6XHGogunqokEK-P_tJdgDwVwrY5aTfvh11H6L6hk44z_lt3B30pDQQYfLDoiLy5qDVbq_wU2g47gw4R-k7DmHQgYsB5CKv2xyYXn0hfmv0dnF7CzaCD0Ejslw3gpVQ3SeT5Pt5GEZ4xTfCTPftperZ5k9j4EjAIg6qlLHigSah383c-XH0coWomYTWyVSg70hUNhwtf01CqQMtzUt3uruktvGJajPPu-GrOmuh0vQLVY81rRCHmjjOJ3HH5bEFjZ9AgkUVrcYaoHfRpogynjqKb2p24K446W3K2cjwKOYALh4jFRqD95FP8NvGCO0frYaVi9jqPUxtvh-WAVAZJb4uGDZc; sess-at-main="lumBgL0aPQzBf5/j46c0GmuetzUlfE+EFg2uHi0QBGs="; sst-main=Sst1|PQGiqd1Ws3ecMUly2W_uXI_UC5KFNQ8tX-oTceElUjDcCexSvRqyulXixP3Mh1LSDf_dY02Zmgw5oi-BbZGXeKkOroLUrBBfgnU5MlALR3ExzY52rSLd2WZi8iKtFTdbDJ-OYXX2gtqExNVawNGVnPeX_7t82vOlxUVlYS7TIXtgmoj_voOZ3AXWlpTdRHfGtiaP56FjD-7yuPJz0cE0OnHVYlZvso6SjsUney9n-TZhbwbZjmx4Pj1MTg6e9F61XDoY_fxe9kSB5xhOLaTSn0i3QZ8YVU_SepzTWH7YCdtYMIx35NUN1B1I69GpXIR3L_A38SgjShRSx0KwcHBrnBnPkw; lc-main=en_US; session-id-time=2082787201l; i18n-prefs=USD; csm-hit=tb:s-XJ4FYPJVEJ63DQA0M8MY|1592447206788&t:1592447209231&adb:adblk_no',
            "sec-fetch-dest": "document",
            "sec-fetch-mode": "navigate",
            "sec-fetch-site": "same-origin",
            'connection': 'close',
            "sec-fetch-user": "?1",
            "upgrade-insecure-requests": "1",
        }

    # 随机生成小数返回字典位置
    def rand_pick(self, seq, probabilities):
        x = random.uniform(0, 1)
        cumprob = 0.0
        for item, item_pro in zip(seq, probabilities):
            cumprob += item_pro
            if x < cumprob:
                break
        return item

    # 指定概率获取user-agent
    def chioce_user_agent(self):
        # 指定概率
        probabilities = [0.3, 0.7]
        index = self.rand_pick(self.ua_dict.keys(), probabilities)
        return random.choice(self.ua_dict.get(index))

    # 获取代理
    @retry(stop_max_attempt_number=4, wait_random_max=2000)
    def load_ip_list(self):
        while True:
            global ip_list
            # 读取ip名单 ['183.166.19.222:3265']
            r = requests.get(
                "http://121.37.19.221/ygvps/api/api/v1/proxy_list?group=3737&ttl=60&sort=ttl&prot=4"
            )
            ip_list = [json_dict.get('addr') for json_dict in json.loads(r.text).get('list')]
            print(ip_list)
            time.sleep(60)

    # 识别验证码
    @retry(stop_max_attempt_number=5, wait_random_max=2000)
    def get_code(self, url, proxy_handler):
        header = self.img_headers
        header['path'] = url.replace('https://images-na.ssl-images-amazon.com', '')
        header['User-Agent'] = self.chioce_user_agent()
        content = requests.get(url, verify=False, timeout=20, proxies=proxy_handler, headers=header)
        files = {'img': content.content}
        content.close()
        code = json.loads(requests.post('http://192.168.0.222:8008/amz/v1', files=files).text)
        # code = json.loads(requests.post('http://172.18.0.2:8000/amz/v1', files=files).text)
        if code.get('status_code') != 200:
            return ''
        else:
            return code.get('result')

    # 提交验证码,返回重定向页面
    # 全部报错才会报错，如果其中一次正常，则继续执行
    # 两次retry之间等待随机不超过2秒，重试5次
    @retry(stop_max_attempt_number=5, wait_random_max=2000)
    def up_code(self, url, code, amzn, session, proxy_handler):
        data = {
            "amzn": amzn,
            "amzn-r": url.replace('https://www.amazon.com/', ''),
            "field-keywords": code
        }
        up_code_url = 'https://www.amazon.com/errors/validateCaptcha?'
        header = self.code_header
        header['downlink'] = random.choice(self.downlink_list)
        header['ect'] = '4g'
        header['rtt'] = str(random.randrange(50, 300, 50))
        header['User-Agent'] = self.chioce_user_agent()
        result = session.get(up_code_url, params=data, proxies=proxy_handler, verify=False, headers=header, timeout=15)
        if result.url == 'https://www.amazon.com/':
            logging.info('破解验证码成功url = {}'.format(url))
            return True
        else:
            logging.info('破解验证码失败url = {}'.format(url))
            return False

    # 将错误的页面写进html文件夹下的html文件
    def get_err_html(self, asin, err_msg, html, page):
        f = codecs.open('html/{}-{}-{}.html'.format(str(asin), err_msg, str(page)), 'w', "utf-8")
        f.write(html)
        f.close()

    # 获取网页内容
    @retry(stop_max_attempt_number=6, wait_random_max=2000)
    def get_html(self, url, headers):
        # 每次执行函数，请求数加一
        self.get_count += 1
        session = requests.Session()
        session.keep_alive = False  # 关闭多余连接
        ip = random.choice(ip_list)
        proxy_handler = {
            "http": "socks5h://" + ip,
            "https": "socks5h://" + ip,
        }
        try:
            headers['path'] = url.replace('https://www.amazon.com/', '')
            headers['User-Agent'] = self.chioce_user_agent()
            headers['downlink'] = random.choice(self.downlink_list)
            headers['ect'] = '4g'
            headers['rtt'] = str(random.randrange(0, 300, 50))
            r = session.get(url, headers=headers, timeout=20, proxies=proxy_handler, verify=False)
        except requests.RequestException as e:
            # 错误url添加进错误队列
            e = str(e)
            if 'Max retries exceeded with url' in e:
                e = 'Max retries exceeded with url'
            if e not in self.err_dict.keys():
                self.err_dict[e] = 1
            else:
                self.err_dict[e] = self.err_dict.get(e) + 1
            if 'Remote end closed connection without response' in e:
                logging.info(headers)
            raise e
        else:
            if r.status_code == 200:
                Sorry = re.findall(
                    "Sorry, we just need to make sure you're not a robot. For best results, please make sure your "
                    "browser "
                    "is accepting cookies.",
                    r.text)
                if not Sorry:
                    r.close()
                    return r.text
                else:
                    # 发现验证码
                    # 获取amzn参数
                    amzn = re.findall('<input type=hidden name="amzn" value="(.*?)" />', r.text)[0]
                    # 获取验证码url
                    img_url = re.findall('img src="(.*?).jpg"', r.text)[0] + '.jpg'
                    # 获取验证码
                    code = self.get_code(img_url, proxy_handler)
                    # 返回重定向页面
                    result = self.up_code(url, code, amzn, session, proxy_handler)
                    if result:
                        r = session.get(url, headers=headers, timeout=20, proxies=proxy_handler, verify=False)
                        r.close()
                        return r.text
                    else:
                        r.close()
                        return ''
            else:
                logging.info(url + '  status_code : ' + str(r.status_code))
                return ''

    # 插入数据库
    def into_db(self, data):
        try:
            session = self.db_session()
            session.execute(self.Amazon.__table__.insert(), data)
            session.commit()
            session.close()
            return True
        except:
            err = traceback.format_exc() + ' | ' + data
            logging.info(err)
            return False

    def get_db_asin_count(self, asin):
        session = self.db_session()
        content_list = session.query(self.Amazon).filter(self.Amazon.asin == asin).all()
        session.close()
        return len(content_list)

    # 获取评论内容
    def get_content(self, url):
        page = int(re.findall('pageNumber=(\d+)', url)[0])
        asin = re.findall('product-reviews/(.*?)/', url)
        try:
            html = self.get_html(url, self.shop_headers)
        except:
            self.err_dict['页面请求重试出错 :' + url] = 1
            html = ''
        if html != '':
            soup = BeautifulSoup(html, "lxml")
            all_content_count = soup.find_all('div', attrs={'class': 'a-section review aok-relative'})
            page_content = []
            for i in all_content_count:

                content_dict = {'review_id': i.get('id'),
                                'profile_name': i.find('div', attrs={'class': 'a-profile-content'}).text,
                                'review_date': i.find('span', attrs={
                                    'class': 'a-size-base a-color-secondary review-date'}).text,
                                'review_body': i.find('span', attrs={
                                    'class': 'a-size-base review-text review-text-content'}).text,
                                'page': page,
                                'asin': asin}
                page_content.append(content_dict)
            result = self.into_db(page_content)
            if result:
                self.success_count += 1
            else:
                err_msg = '评论重复插入失败'
                self.get_err_html(asin, err_msg, html, page)
                self.err_dict[url] = 1
        else:
            self.err_dict[url] = 1

    # 获取asin.txt里数据
    def get_asin(self):
        asin_list = []
        with open('asin.txt', 'r') as f1:
            for asin in f1.readlines():
                if asin != None:
                    asin_list.append(asin.strip("\n"))
        f1.close()
        return asin_list



    # 爬虫入口
    def crawler(self, asin):
        print("#########正在爬取的#########{}".format(asin))
        all_content_count = []
        AllPage = 0
        # 爬取商品页面
        shop_url = "https://www.amazon.com/AmazonBasics-AMZ401-File-Folders-Assorted/dp/{}".format(asin)
        # # asin为最后一位
        # asin = shop_url.split("/")[-1]
        # # 商品页面html
        # shop_html = self.get_html(shop_url, self.shop_headers)
        # soup = BeautifulSoup(shop_html, "lxml")
        # shop_name = soup.find_all('span', attrs={'id': 'productTitle'})[0].text
        # print(shop_name)
        # 第一页评论页面html
        content_url = "https://www.amazon.com/product-reviews/{}".format(asin)
        try:
            content_html = self.get_html(content_url, self.shop_headers)
        except:
            self.err_dict['页面请求重试出错 :' + content_url] = 1
            content_html = ''

        if content_html != '':
            # 提取评论数
            try:
                all_content_count = re.findall("Showing 1-\d+ of (.*?) reviews", content_html)
            except:
                err_msg = '解析错误'
                logging.info(err_msg + ': ' + content_url)
                self.get_err_html(asin, err_msg, content_html, str(1))
            else:
                if not all_content_count:
                    logging.info('没有找到评论数:' + content_url)
                else:
                    soup = BeautifulSoup(content_html, "lxml")
                    all_content = soup.find_all('div', attrs={'class': 'a-section review aok-relative'})
                    page_content = []
                    for i in all_content:

                        content_dict = {'review_id': i.get('id'),
                                        'profile_name': i.find('div', attrs={'class': 'a-profile-content'}).text,
                                        'review_date': i.find('span', attrs={
                                            'class': 'a-size-base a-color-secondary review-date'}).text,
                                        'review_body': i.find('span', attrs={
                                            'class': 'a-size-base review-text review-text-content'}).text,
                                        'page': 1, 'asin': asin}
                        page_content.append(content_dict)
                    result = self.into_db(page_content)
                    if result:
                        self.success_count += 1
                    else:
                        err_msg = '评论重复插入失败'
                        self.get_err_html(asin, err_msg, content_html, str(1))
                        self.err_dict[content_url] = 1
        # 判断是否有评论
        if all_content_count:
            # 获取评论数量
            AllPage = int(all_content_count[0].replace(',', ''))
            print("{}   AllPageCount ： ".format(asin) + str(AllPage))
            # 计算分页总数
            Page = int((AllPage - 1) / 10 + 1)
            print("{}   PageCount ： ".format(asin) + str(Page))
            if Page > 500:
                Page = 500
            content_url_list = [
                "https://www.amazon.com/AmazonBasics-AMZ401-File-Folders-Assorted/product-reviews/{" \
                "}/ref=cm_cr_arp_d_paging_btm_{}?ie=UTF8&pageNumber={}".format(
                    asin, str(page), str(page)) for page in range(2, Page + 1)]

            th = Threadpool(50)
            th.map(self.get_content, content_url_list)
        # 校验爬取的评论是否完整
        db_content_count = self.get_db_asin_count(asin)
        print(asin + ' 爬取到的评论数为 ： ' + str(db_content_count))
        if db_content_count == AllPage:
            logging.info('#########爬取完成############ {}'.format(asin))
        else:
            logging.info('{} 评论缺失 ：{}条'.format(asin, str(AllPage - db_content_count)))


if __name__ == '__main__':
    start_time = datetime.datetime.now()
    A = AmazonCrawler()
    # 初始化数据库连接，使用pymysql模块
    engine = create_engine("mysql+pymysql://{}:{}@{}/{}?charset={}?connect_timeout=10"
                           .format('root',
                                   '',
                                   '127.0.0.1',
                                   # 'amazonreviewstest',
                                   # '172.17.0.2:3306',
                                   'lj',
                                   'utf8mb4')
                           , poolclass=NullPool)
    # 创建DBSession类型
    DBSession = sessionmaker(bind=engine)
    # 创建session对象
    A.db_session = DBSession
    # 开启线程定时刷新代理
    t1 = threading.Thread(target=A.load_ip_list)
    t1.start()
    time.sleep(3)
    # 获取asin
    asin_list = A.get_asin()
    # 去重
    asin_list = list(set(asin_list))
    # have_asinlist = ['B00KDNSNVM', 'B07G59RNPM']
    # 取差集
    # b = asin_list[0:100]
    # c = list(set(b).difference(set(have_asinlist)))

    th = Threadpool(30)
    th.map(A.crawler, ['B07GWJGS7X', 'B07B2QVZX6'])

    logging.info("错误字典 ：" + str(A.err_dict))
    logging.info("错误总数 ：" + str(sum(A.err_dict.values())))
    logging.info("成功总数 ：" + str(A.success_count))
    logging.info("总请求数： " + str(A.get_count))
    end_time = datetime.datetime.now()
    logging.info("结束所耗时   ： {}".format(str(end_time - start_time)))

    print("结束所耗时   ： {}".format(str(end_time - start_time)))

