# -*- coding: utf-8 -*-
import time
import random
import requests
from requests.adapters import HTTPAdapter
from bs4 import BeautifulSoup
import re
import json
import lxml
import traceback
import codecs
import datetime
import threading
from retrying import retry
from multiprocessing.dummy import Pool as Threadpool
import logging
import uitl.db as db
import uitl.uitl as uitl

logging.basicConfig(format='%(asctime)s - %(filename)s[line:%(lineno)d] - %(levelname)s: %(message)s',
                    level=logging.INFO,
                    filename='log/AmazonCrawler.log',
                    filemode='a')
requests.adapters.DEFAULT_RETRIES = 8  # 增加重连次数
requests.packages.urllib3.disable_warnings()


class AmazonCrawler:

    def __init__(self):
        # ORM基类
        self.db_uitl = db.db_uitl()
        # headers里的参数
        self.downlink_list = ['1.75', '10', '2.1', '2.25', '2.7', '2.2', '1.8', '2.15']
        self.uitl = uitl
        # 请求次数
        self.get_count = 0
        # 请求成功数
        self.success_count = 0
        # 评论列表
        self.content_list = []
        # 错误信息字典，key为错误信息，value为错误次数
        self.err_dict = {}
        self.img_headers = {
            "authority": "images-na.ssl-images-amazon.com",
            "method": "GET",
            "scheme": "https",
            "accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,"
                      "application/signed-exchange;v=b3;q=0.9",
            "accept-encoding": "gzip, deflate, br",
            "accept-language": "zh-CN,zh;q=0.9,zh-TW;q=0.8,en-US;q=0.7,en;q=0.6",
            "sec-fetch-dest": "document",
            "sec-fetch-mode": "navigate",
            "sec-fetch-site": "none",
            "sec-fetch-user": "?1",
            'connection': 'close',
            "upgrade-insecure-requests": "1",
        }
        self.code_header = {
            "authority": "www.amazon.com",
            "method": "GET",
            "scheme": "https",
            "accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,"
                      "application/signed-exchange;v=b3;q=0.9",
            "accept-encoding": "gzip, deflate, br",
            "accept-language": "zh-CN,zh;q=0.9,zh-TW;q=0.8,en-US;q=0.7,en;q=0.6",
            "sec-fetch-dest": "document",
            "sec-fetch-mode": "navigate",
            "sec-fetch-site": "none",
            "sec-fetch-user": "?1",
            'connection': 'close',
            "upgrade-insecure-requests": "1",
        }
        # headers
        self.shop_headers = {
            "authority": "www.amazon.com",
            "method": "GET",
            "scheme": "https",
            "accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,"
                      "application/signed-exchange;v=b3;q=0.9",
            "accept-encoding": "gzip, deflate, br",
            "accept-language": "zh-CN,zh;q=0.9,zh-TW;q=0.8,en-US;q=0.7,en;q=0.6",
            # # 未登录
            # "cookie": 'ubid-main=134-8468621-4495316; '
            #           'x-wl-uid=1QsTj97b5/0n2p7kOcGpmyV2DKqLijo1Fq86OTs3n0j7Qwp7KFrOlKmNhUQNQ5OJk'
            #           '/SBVzN1pG831JqtHRYc0LRBII3BNKcrj1KsQXMzX18n7IouALXCjmlM2QT8Dk3A5c5Fw5gPx6a8=; '
            #           'x-amz-captcha-1=1592370854461372; x-amz-captcha-2=Jg9TMmqzenSGbYgrCYnt6w==; '
            #           'session-id=139-5890879-8867853; s_fid=3CD9B99C91FFD621-3B7C3F8B657B7904; '
            #           'regStatus=pre-register; aws-priv=eyJ2IjoxLCJldSI6MCwic3QiOjB9; '
            #           'aws-target-static-id=1592615910300-338492; aws-target-data=%7B%22support%22%3A%221%22%7D; '
            #           'aws-target-visitor-id=1592615910303-827435.38_0; sp-cdn="L5Z9:CN"; '
            #           'sst-main=Sst1|PQFg26_AUywWEml2y6udfAvICwuyzclo4mHCNpx2M1eeUlWB1YKW5OiSuvoeM7FBaplCjBFPFaRxLjWkSZss_FWVYjDSr1qfap7Z9r88DxLXeBq83foT06kdFBlUTkD7t1DJ6R49SLomt0lwi2AcJiuEXNhZ3ELpjLBZG4KczIhZMJXiZf1Nr3LMf5uPiEzRUBTr_0fZEWgYFcCmn3YUiVavv234HE1XcJHCJ33Ip1r_abI1yp_J92HYNC-5xwtUBQPmIAhjAbdPGR7gIEf0ZoaPap3D8haYtDR-wmRajExr1r8Hb2MDXdGEOkumH3ZjKkJAD3vNPgaUoKE_L-UCBogC4A; i18n-prefs=USD; session-id-time=2223543209l; session-token="A1wF6kKeXkZd5HgO358248efEPUxb6v2ke/GLBnLD9XlTw8lYvjlLz6smi5r2LtA5OMfvMFxLFUS4HaCAwNaKSsZDHywIwk+iQofBqJ65iMF8H7VDGEPQgXiLZQX6tVizM3WsdsMLd7HDWZN/GH7ccc9MYa8yaR2rS2tYqz58NCZvs3aX9A09ZpqHOpBhceLKs9/ACR28/8RuGm8yExMiKsoygNv6IYyR1/6/h/4JmAt5ffbjO5+wRhsab4PyvlH0jYWAHL+qA4="; csm-hit=tb:8KQPKKHV9MC07F7J6B4M+s-21YMVF1XAJ75ED6FZFT2|1592823223031&t:1592823223031&adb:adblk_no',
            # 登录
            # "cookie": 'sp-cdn="L5Z9:CN"; ubid-main=134-8468621-4495316; s_nr=1591953958615-Repeat; s_vnum=2023949268528%26vn%3D2; s_dslv=1591953958616; x-wl-uid=1QsTj97b5/0n2p7kOcGpmyV2DKqLijo1Fq86OTs3n0j7Qwp7KFrOlKmNhUQNQ5OJk/SBVzN1pG831JqtHRYc0LRBII3BNKcrj1KsQXMzX18n7IouALXCjmlM2QT8Dk3A5c5Fw5gPx6a8=; skin=noskin; x-amz-captcha-1=1592370854461372; x-amz-captcha-2=Jg9TMmqzenSGbYgrCYnt6w==; session-id=139-5890879-8867853; session-token="3m4S8CgBQHR7Bydz6bs4YkT5EGKCPxIWLj5MEo472f96EZ1Nd1en++/X/FqPUl78SYV8UHXsoJUmmM8tIh/X3Ef1YZuxtd4GMHYjdmckr5Tvo5N0abqCt4n9tvLRz11jaPYFwGpxUuvwausvb92IT/avvb9WHBS+fMc9zi/8/yb2Y06wuja9QEApJblN/Xnv7GwzPvwx+WkWVYfDJGvkbOGXvyb9x1YL494AQQ50RUM="; x-main="IHtuRUkpgG?HOOjZhymH8hloItJ7eaZNXU8qQt2tkjEs66m4?MIjJavxlTWeDrl5"; at-main=Atza|IwEBIHgPCBWzRfh7xbPnZ6IHNmgJHmTAKTJG8826DNiVZHBoFT0VZ9xZxlMLYGO-KZ6XHGogunqokEK-P_tJdgDwVwrY5aTfvh11H6L6hk44z_lt3B30pDQQYfLDoiLy5qDVbq_wU2g47gw4R-k7DmHQgYsB5CKv2xyYXn0hfmv0dnF7CzaCD0Ejslw3gpVQ3SeT5Pt5GEZ4xTfCTPftperZ5k9j4EjAIg6qlLHigSah383c-XH0coWomYTWyVSg70hUNhwtf01CqQMtzUt3uruktvGJajPPu-GrOmuh0vQLVY81rRCHmjjOJ3HH5bEFjZ9AgkUVrcYaoHfRpogynjqKb2p24K446W3K2cjwKOYALh4jFRqD95FP8NvGCO0frYaVi9jqPUxtvh-WAVAZJb4uGDZc; sess-at-main="lumBgL0aPQzBf5/j46c0GmuetzUlfE+EFg2uHi0QBGs="; sst-main=Sst1|PQGiqd1Ws3ecMUly2W_uXI_UC5KFNQ8tX-oTceElUjDcCexSvRqyulXixP3Mh1LSDf_dY02Zmgw5oi-BbZGXeKkOroLUrBBfgnU5MlALR3ExzY52rSLd2WZi8iKtFTdbDJ-OYXX2gtqExNVawNGVnPeX_7t82vOlxUVlYS7TIXtgmoj_voOZ3AXWlpTdRHfGtiaP56FjD-7yuPJz0cE0OnHVYlZvso6SjsUney9n-TZhbwbZjmx4Pj1MTg6e9F61XDoY_fxe9kSB5xhOLaTSn0i3QZ8YVU_SepzTWH7YCdtYMIx35NUN1B1I69GpXIR3L_A38SgjShRSx0KwcHBrnBnPkw; lc-main=en_US; session-id-time=2082787201l; i18n-prefs=USD; csm-hit=tb:s-XJ4FYPJVEJ63DQA0M8MY|1592447206788&t:1592447209231&adb:adblk_no',
            "sec-fetch-dest": "document",
            "sec-fetch-mode": "navigate",
            "sec-fetch-site": "same-origin",
            'connection': 'close',
            "sec-fetch-user": "?1",
            "upgrade-insecure-requests": "1",
        }
        self.cookie = {}

    # 获取代理
    @retry(stop_max_attempt_number=4, wait_random_max=2000)
    def load_ip_list(self):
        while True:
            global ip_list
            # 读取ip名单 ['183.166.19.222:3265']
            r = requests.get(
                "http://121.37.19.221/ygvps/api/api/v1/proxy_list?group=3737&ttl=60&sort=ttl&prot=4"
            )
            ip_list = [json_dict.get('addr') for json_dict in json.loads(r.text).get('list')]
            print(ip_list)
            time.sleep(60)

    # 识别验证码
    @retry(stop_max_attempt_number=5, wait_random_max=2000)
    def get_code(self, url, proxy_handler):
        header = self.img_headers
        header['path'] = url.replace('https://images-na.ssl-images-amazon.com', '')
        header['User-Agent'] = self.uitl.chioce_user_agent()
        content = requests.get(url, verify=False, timeout=20, proxies=proxy_handler, headers=header)
        files = {'img': content.content}
        content.close()
        code = json.loads(requests.post('http://192.168.0.222:8008/amz/v1', files=files).text)
        # code = json.loads(requests.post('http://172.18.0.2:8000/amz/v1', files=files).text)
        if code.get('status_code') != 200:
            return ''
        else:
            return code.get('result')

    # 提交验证码,返回重定向页面
    # 全部报错才会报错，如果其中一次正常，则继续执行
    # 两次retry之间等待随机不超过2秒，重试5次
    @retry(stop_max_attempt_number=5, wait_random_max=2000)
    def up_code(self, url, code, amzn, session, proxy_handler):
        data = {
            "amzn": amzn,
            "amzn-r": url.replace('https://www.amazon.com/', ''),
            "field-keywords": code
        }
        up_code_url = 'https://www.amazon.com/errors/validateCaptcha?'
        header = self.code_header
        header['downlink'] = random.choice(self.downlink_list)
        header['ect'] = '4g'
        header['rtt'] = str(random.randrange(50, 300, 50))
        header['User-Agent'] = self.uitl.chioce_user_agent()
        result = session.get(up_code_url, params=data, proxies=proxy_handler, verify=False, headers=header, timeout=15)
        if result.url == 'https://www.amazon.com/':
            logging.info('破解验证码成功url = {}'.format(url))
            return True
        else:
            logging.info('破解验证码失败url = {}'.format(url))
            return False

    @retry(stop_max_attempt_number=5, wait_random_max=2000)
    def load_cookie(self):
        while True:
            if self.cookie == {}:
                print(66)
                ip = random.choice(ip_list)
                proxy_handler = {
                    "http": "socks5h://" + ip,
                    "https": "socks5h://" + ip,
                }
                try:
                    r = requests.get('https://www.amazon.com/', headers=self.shop_headers, timeout=20, proxies=proxy_handler, verify=False,
                                )
                except Exception as e:
                    raise e
                else:
                    cookie = requests.utils.dict_from_cookiejar(r.cookies)
                    self.cookie = cookie
            time.sleep(10)

    # 获取网页内容
    @retry(stop_max_attempt_number=6, wait_random_max=2000)
    def get_html(self, url, headers):
        # 每次执行函数，请求数加一
        self.get_count += 1
        session = requests.Session()
        session.keep_alive = False  # 关闭多余连接
        ip = random.choice(ip_list)
        proxy_handler = {
            "http": "socks5h://" + ip,
            "https": "socks5h://" + ip,
        }
        try:
            # random_arg = str(random.randint(1111111, 9999999))
            # cookie = {
            #     "session-id": "139-5890879-{}".format(random_arg),
            #     "session-token": "IDm4MzK2EIfmnA78GamvtZTbJeHGIoI7HtbbNzySKqI5L1cph1hZjeMHS4FyVrsqlPCndF2dxAMckDprztvQc/rlao7x88VpqQnoRpt66JCAJQ/p37UzTD1C/Hag0Itju34jaZjxMJfRM0bLUi8jKt0rKBEN9XJ0t0VxiU9l44WMtZYoOJmiMOE9A62M3KyJS9iUig0M+rUHEoBFZkfK5H0rP8ksA4Lk1ecGwklvPMm+wrTxjKzNxhSbZUBPVbPAepjxvVf8nuU=",
            #     "ubid-main": "134-8468621-{}".format(random_arg),
            #     "session-id-time": "2223543209l",
            #     "i18n-prefs": "USD",
            #     "csm-hit": "tb:8KQPKKHV9MC07F7J6B4M+s-21YMVF1XAJ75ED6FZFT2|1592823223031&t:1592823223031&adb:adblk_no",
            #     "sst-main": "Sst1|PQFg26_AUywWEml2y6udfAvICwuyzclo4mHCNpx2M1eeUlWB1YKW5OiSuvoeM7FBaplCjBFPFaRxLjWkSZss_FWVYjDSr1qfap7Z9r88DxLXeBq83foT06kdFBlUTkD7t1DJ6R49SLomt0lwi2AcJiuEXNhZ3ELpjLBZG4KczIhZMJXiZf1Nr3LMf5uPiEzRUBTr_0fZEWgYFcCmn3YUiVavv234HE1XcJHCJ33Ip1r_abI1yp_J92HYNC-5xwtUBQPmIAhjAbdPGR7gIEf0ZoaPap3D8haYtDR-wmRajExr1r8Hb2MDXdGEOkumH3ZjKkJAD3vNPgaUoKE_L-UCBogC4A",
            #     "s_fid": "3CD9B99C91FFD621-3B7C3F8B657B7904"
            # }
            headers['path'] = url.replace('https://www.amazon.com/', '')
            headers['User-Agent'] = self.uitl.chioce_user_agent()
            headers['downlink'] = random.choice(self.downlink_list)
            headers['ect'] = '4g'
            # headers['cookie'] = cookie
            headers['rtt'] = str(random.randrange(0, 300, 50))
            print(self.cookie)
            r = session.get(url, headers=headers, timeout=20, proxies=proxy_handler, verify=False, cookies=self.cookie)
        except requests.RequestException as e:
            # 错误url添加进错误队列
            e = str(e)
            logging.info(e)
            if 'Max retries exceeded with url' in e:
                e = 'Max retries exceeded with url'
            if e not in self.err_dict.keys():
                self.err_dict[e] = 1
            else:
                self.err_dict[e] = self.err_dict.get(e) + 1
            if 'Remote end closed connection without response' in e:
                logging.info(headers)
            self.cookie = {}
            raise e
        else:
            if r.status_code == 200:
                Sorry = re.findall(
                    "Sorry, we just need to make sure you're not a robot. For best results, please make sure your "
                    "browser "
                    "is accepting cookies.",
                    r.text)
                if not Sorry:
                    r.close()
                    return r.text
                else:
                    # 发现验证码
                    # 获取amzn参数
                    amzn = re.findall('<input type=hidden name="amzn" value="(.*?)" />', r.text)[0]
                    # 获取验证码url
                    img_url = re.findall('img src="(.*?).jpg"', r.text)[0] + '.jpg'
                    # 获取验证码
                    code = self.get_code(img_url, proxy_handler)
                    # 返回重定向页面
                    result = self.up_code(url, code, amzn, session, proxy_handler)

                    if result:
                        r = session.get(url, headers=headers, timeout=20, proxies=proxy_handler, verify=False)
                        r.close()
                        return r.text
                    else:
                        r.close()
                        return ''
            else:
                logging.info(url + '  status_code : ' + str(r.status_code))
                return ''

    # 获取评论内容
    def get_content(self, url):
        page = int(re.findall('pageNumber=(\d+)', url)[0])
        asin = re.findall('product-reviews/(.*?)/', url)
        try:
            html = self.get_html(url, self.shop_headers)
        except:
            self.err_dict['页面请求重试出错 :' + url] = 1
            html = ''
        if html != '':
            soup = BeautifulSoup(html, "lxml")
            all_content_count = soup.find_all('div', attrs={'class': 'a-section review aok-relative'})
            page_content = []
            for i in all_content_count:
                content_dict = {'review_id': i.get('id'),
                                'profile_name': i.find('div', attrs={'class': 'a-profile-content'}).text,
                                'review_date': i.find('span', attrs={
                                    'class': 'a-size-base a-color-secondary review-date'}).text,
                                'review_body': i.find('span', attrs={
                                    'class': 'a-size-base review-text review-text-content'}).text,
                                'page': page,
                                'asin': asin}
                page_content.append(content_dict)
            if page_content:
                result = self.db_uitl.into_db(page_content)
                if result:
                    self.success_count += 1
                else:
                    err_msg = '评论重复插入失败'
                    self.err_dict[err_msg + ' : ' + url] = 1
                    self.uitl.get_err_html(asin, err_msg, html, page)
        else:
            self.err_dict[url] = 1

    # 爬虫入口
    def crawler(self, asin):
        print("#########正在爬取的#########{}".format(asin))
        all_content_count = []
        AllPage = 0
        # 爬取商品页面
        shop_url = "https://www.amazon.com/AmazonBasics-AMZ401-File-Folders-Assorted/dp/{}".format(asin)
        # # asin为最后一位
        # asin = shop_url.split("/")[-1]
        # # 商品页面html
        # shop_html = self.get_html(shop_url, self.shop_headers)
        # soup = BeautifulSoup(shop_html, "lxml")
        # shop_name = soup.find_all('span', attrs={'id': 'productTitle'})[0].text
        # print(shop_name)
        # 第一页评论页面html
        content_url = "https://www.amazon.com/product-reviews/{}".format(asin)
        try:
            content_html = self.get_html(content_url, self.shop_headers)
        except:
            self.err_dict['页面请求重试出错 :' + content_url] = 1
            content_html = ''

        if content_html != '':
            # 提取评论数
            try:
                all_content_count = re.findall("Showing 1-\d+ of (.*?) reviews", content_html)
            except:
                err_msg = '解析错误'
                logging.info(err_msg + ': ' + content_url)
                self.uitl.get_err_html(asin, err_msg, content_html, str(1))
            else:
                if not all_content_count:
                    logging.info('没有找到评论数:' + content_url)
                    self.uitl.get_err_html(asin, '没有找到评论数', content_html, str(1))
                else:
                    soup = BeautifulSoup(content_html, "lxml")
                    all_content = soup.find_all('div', attrs={'class': 'a-section review aok-relative'})
                    page_content = []
                    for i in all_content:
                        content_dict = {'review_id': i.get('id'),
                                        'profile_name': i.find('div', attrs={'class': 'a-profile-content'}).text,
                                        'review_date': i.find('span', attrs={
                                            'class': 'a-size-base a-color-secondary review-date'}).text,
                                        'review_body': i.find('span', attrs={
                                            'class': 'a-size-base review-text review-text-content'}).text,
                                        'page': 1, 'asin': asin}
                        page_content.append(content_dict)
                    result = self.db_uitl.into_db(page_content)
                    if result:
                        self.success_count += 1
                    else:
                        err_msg = '评论重复插入失败'
                        self.err_dict[err_msg + ' : ' + content_url] = 1
                        self.uitl.get_err_html(asin, err_msg, content_html, str(1))
        # 判断是否有评论
        if all_content_count:
            # 获取评论数量
            AllPage = int(all_content_count[0].replace(',', ''))
            print("{}   AllPageCount ： ".format(asin) + str(AllPage))
            # 计算分页总数
            Page = int((AllPage - 1) / 10 + 1)
            print("{}   PageCount ： ".format(asin) + str(Page))
            if Page > 500:
                Page = 500
            content_url_list = [
                "https://www.amazon.com/AmazonBasics-AMZ401-File-Folders-Assorted/product-reviews/{" \
                "}/ref=cm_cr_arp_d_paging_btm_{}?ie=UTF8&pageNumber={}".format(
                    asin, str(page), str(page)) for page in range(2, Page + 1)]
            th = Threadpool(50)
            th.map(self.get_content, content_url_list)
        # 校验爬取的评论是否完整
        db_content_count = self.db_uitl.get_db_asin_count(asin)
        print(asin + ' 爬取到的评论数为 ： ' + str(db_content_count))
        if db_content_count == AllPage:
            logging.info('#########爬取完成############ {}'.format(asin))
        else:
            logging.info('{} 评论缺失 ：{}条'.format(asin, str(AllPage - db_content_count)))


if __name__ == '__main__':
    start_time = datetime.datetime.now()
    A = AmazonCrawler()
    # 开启线程定时刷新代理
    t1 = threading.Thread(target=A.load_ip_list)
    t1.start()
    t2 = threading.Thread(target=A.load_cookie)
    t2.start()
    time.sleep(3)

    # 获取asin
    asin_list = A.uitl.get_asin()
    # 去重
    asin_list = list(set(asin_list))
    # have_asinlist = ['B00KDNSNVM', 'B07G59RNPM'], 'B07B2QVZX6'
    # 取差集
    # b = asin_list[0:100]
    # c = list(set(b).difference(set(have_asinlist)))
    th = Threadpool(1)
    th.map(A.crawler, ['B07TTVZM9S'])

    logging.info("错误字典 ：" + str(A.err_dict))
    logging.info("错误总数 ：" + str(sum(A.err_dict.values())))
    logging.info("成功总数 ：" + str(A.success_count))
    logging.info("总请求数： " + str(A.get_count))
    end_time = datetime.datetime.now()
    logging.info("结束所耗时   ： {}".format(str(end_time - start_time)))

    print("结束所耗时   ： {}".format(str(end_time - start_time)))
