# -*- coding: UTF-8 -*-
# Author: Damon(wuud1)
# CreateDate: 2018-08-03
# Message: 雷明顿网站零件数据抓取,两层结构,cookies里有一个加密值,测试没实际作用(所以本代码没有带),功能:断点续爬,控制速度.main函数下集中控制.封装完善.

import requests
import redis
import os
import sys
import hashlib
import pymysql
import json
from lxml import etree
import time

# 解决python2编码问题
reload(sys)
sys.setdefaultencoding('utf-8')

class BremboForPid(object):

    def __init__(self, **kwargs):
        self.db_config = kwargs.get("db_config")  # 数据库配置
        self.select_table = kwargs.get("select_table")  # 查询表名
        self.insert_table = kwargs.get("insert_table")  # 插入表名
        self.partBrand = kwargs.get("partBrand")  # 网站品牌
        self.redis_set = '{}_set'.format(kwargs.get('partBrand'))  # redis数据库集合名,用来存储查询过的pid
        self.conn1 = None  # 数据库链接和游标
        self.conn2 = None
        self.curs1 = None
        self.curs2 = None
        self.base_path = '/Users/duanmengdi/Desktop/workspace/{}_html/'.format(kwargs.get("partBrand"))  # 存储html文件的路径
        self.location = ""  # 重定向链接
        self.total_num = 0  # 全局计数变量
        self.is_sleep = kwargs.get("is_sleep", True)  # 是否控制速度的开关变量
        self.r = redis.Redis(host='localhost', port=6379, db=0)  # redis链接,用于断点续爬

        self.search_url = 'https://www.bremboparts.com/asiapacific/zh/catalogue/code?code={}'
        self.base_url = 'https://www.bremboparts.com'
        self.headers = {
            "Host": "www.bremboparts.com",
            "Upgrade-Insecure-Requests": "1",
            "User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/67.0.3396.99 Safari/537.36",
            "Accept-Language": "zh-CN,zh;q=0.9",
            # "": "",
        }

        self.cookies = {
            "_ga": "GA1.2.990742129.1531384081",
            "ckl": "true",
            "_gid": "GA1.2.624011020.1533264426",
            "clt": "zh-CN",
            "_gali": "Code",
            # "": "",
        }

    def getmd5(self, str):
        '''获取md5加密电子指纹'''
        md5 = hashlib.md5()
        md5.update(str)
        return md5.hexdigest()

    def get_conn(self, dbconfig_dict):
        '''获取数据库链接'''
        conn = pymysql.connect(**dbconfig_dict)
        return conn

    def get_cursor(self, conn, type='stream'):
        '''获取数据库游标'''
        if type == 'stream':
            return conn.cursor(pymysql.cursors.SSCursor)  # 返回流式游标,查询大量数据时不占用内存(返回数据形式是元组)
        elif type == 'dict':
            return conn.cursor(pymysql.cursors.DictCursor)  # 返回字典形式游标,查询出的数据以字典形式返回
        else:
            raise Exception("cursor type error")

    def parse_url(self, url, method, post_data=None, headers=None, tofile=True, **kwargs):
        requests.packages.urllib3.disable_warnings()  # 因为把ssl安全验证关了,所以不让代码报不安全的https请求警告
        pid = kwargs.get("local_pid", "")
        charset = kwargs.get("charset", "utf8")  # 暂时没有用到
        allow_redirects = kwargs.get("allow_redirects", False)  # 是否允许重定向, 默认不重定向,获取原始响应
        if method == 'GET':
            data = "GET" + url + pid  # 拼接data
            md5 = self.getmd5(data)  # 获取此次请求电子指纹
            file_path = self.base_path + md5 + ".html"
            if os.path.exists(file_path):  # 判断文件是否存在,如果存在就从本地读取
                print "GET for local"
                with open(file_path, 'r') as f:
                    res = f.read()
                response = res
            else:  # 如果路径不存在, 从web端请求
                print "GET for web"
                try:
                    response = requests.get(url=url, verify=False, headers=headers, cookies=self.cookies, timeout=30,
                                            allow_redirects=allow_redirects)
                except Exception as e:
                    response = requests.get(url=url, verify=False, headers=headers, cookies=self.cookies, timeout=30,
                                            allow_redirects=allow_redirects)

                # 每次请求都获取cookies, 并更新self.cookies(如果获取到cookies的话)
                cookies = response.cookies.get_dict()
                for k in cookies:
                    self.cookies[k] = cookies[k]

                # 获取location,
                location = response.headers.get("Location", None)
                if location:
                    print "响应头中存在location"
                    self.location = location
                    # return response.content.decode('utf8')

                if response:
                    if tofile:
                        with open(file_path, 'w') as f:  # 保存此次请求的html
                            f.write(response.content.decode('utf8'))
                    response = response.content.decode("utf8")
                else:
                    response = None

        elif method == "POST":
            data = "POST" + url + json.dumps(post_data)
            md5 = self.getmd5(data)
            file_path = self.base_path + md5 + ".html"
            if os.path.exists(file_path):  # 判断文件是否存在,如果存在就从本地读取
                print "POST for local"
                with open(file_path, 'r') as f:
                    res = f.read()
                response = res
            else:
                print "POST for web"
                try:
                    response = requests.post(url=url, data=post_data, verify=False, headers=headers,
                                             cookies=self.cookies, timeout=30,
                                             allow_redirects=allow_redirects)
                except Exception as e:
                    response = requests.post(url=url, data=post_data, verify=False, headers=headers,
                                             cookies=self.cookies, timeout=30,
                                             allow_redirects=allow_redirects)

                # 获取cookies, 并更新self.cookies
                cookies = response.cookies.get_dict()
                for k in cookies:
                    self.cookies[k] = cookies[k]

                # 获取location,
                location = response.headers.get("Location", None)
                if location:
                    print "响应头中存在location"
                    self.location = location
                    # response, file_path = self.parse_url(url=location, method="GET", headers=headers, local_pid=pid)
                    # print "已重新请求location地址"
                    # return response, file_path
                    # return response.content.decode('utf8')

                if response:
                    if tofile:
                        with open(file_path, 'w') as f:  # 保存此次请求的html
                            f.write(response.content.decode('utf8'))
                    response = response.content.decode("utf8")

                else:
                    response = None

        return response, file_path

    def save2db(self, data_dict, curs):
        # 抽取数据
        pid = data_dict['pid'].decode('utf8')
        brand_name = data_dict['brand_name'].decode('utf8')
        part_brand = data_dict['part_brand'].decode('utf8')
        part_num = data_dict['part_num'].decode('utf8')
        part_name = data_dict['part_name'].decode('utf8')
        brand_name1 = data_dict['brand_name1'].decode('utf8')
        pid1 = data_dict['pid1'].decode('utf8')
        status = data_dict['status'].decode('utf8')

        # 拼接sql语句
        sql = '''INSERT INTO `{}` (`pid`, `brand_name`, `part_brand`, `part_num`, `part_name`, `brand_name1`, `pid1`, `status`) VALUES ('%s', '%s', '%s', '%s', '%s', '%s', '%s', '%s')'''.format(self.insert_table) % (
        pid, brand_name, part_brand, part_num, part_name, brand_name1, pid1, status)
        # print sql
        try:
            curs.execute(sql)
        except Exception as e:
            print e
            print sql
            exit()
        self.total_num += 1  # 数据总量计数

    def r_flushall(self):
        '''删除redis集合,起到重置断点续爬的功能'''
        self.r.delete(self.redis_set)
        self.r.save()

    def run(self):

        # 获取一系列数据库链接,游标
        self.conn1 = self.get_conn(dbconfig_dict=self.db_config)
        self.conn2 = self.get_conn(dbconfig_dict=self.db_config)
        self.curs1 = self.get_cursor(conn=self.conn1)
        self.curs2 = self.get_cursor(conn=self.conn2)
        select_sql = '''select pid,brand from {}'''.format(self.select_table)

        self.curs1.execute(select_sql)

        for data in self.curs1:
            local_pid = data[0]
            # local_pid = 'LR007055'  # 测试用pid, 有结果
            # local_pid = '4H0615121M'  # 测试用pid, 无结果

            # 断点续爬
            if self.r.sismember(self.redis_set, local_pid):
                continue
            self.r.sadd(self.redis_set, local_pid)  # 把pid添加到redis中,实现去重与断点续爬

            brand_name = data[1]
            if self.is_sleep:
                time.sleep(1)  # 控制请求速度
            search_url= self.search_url.format(local_pid)  # 拼接搜索url
            # print search_url
            search_result_response, search_result_file_path = self.parse_url(url=search_url, method="GET", headers=self.headers, local_pid=local_pid)
            if search_result_response:
                search_result_html = etree.HTML(search_result_response)
            else:
                print "没有获取到搜索结果页响应,", local_pid, search_result_file_path
                raise Exception("没有获取到搜索结果页响应")

            detail_url_list = search_result_html.xpath(u'//span[text()="产品数据表"]/../../@href')
            if len(detail_url_list) <= 0:
                print "没有找到详情页列表, 跳过", local_pid, search_result_file_path
                continue

            for detail_url in detail_url_list:
                detail_url = self.base_url + detail_url
                print detail_url

                detail_response, detail_file_path = self.parse_url(url=detail_url, method="GET", headers=self.headers, local_pid=local_pid)
                if detail_response:
                    detail_html = etree.HTML(detail_response)
                else:
                    print "没有获取到详情页响应,", local_pid, detail_file_path
                    raise Exception("没有获取到详情页响应")

                part_num = detail_html.xpath('//h1/text()')
                part_name = detail_html.xpath('//div[@class="subtitle"]/text()')

                part_num = part_num[0].replace('.', '').replace(' ', '') if len(part_num)>0 else ""
                part_name = part_name[0].replace(' ', '').replace('\n', '').replace('\r', '') if len(part_name)>0 else ""
                # print part_num, part_name

                oe_div_list = detail_html.xpath('//div[@class="manufacturer-reference-box"]/div[contains(@class,"item-detail")]')
                if len(oe_div_list) <= 0 :
                    print "没有找到oe对照列表, 跳过", local_pid, detail_file_path
                    continue

                # oe_div_list = oe_div_list[2:]  # 去掉头两个非数据标签, 换了xpath语法,不用去除

                for oe_div in oe_div_list:
                    brand_name1 = oe_div.xpath('./span[contains(@class,"brand-name")]/text()')
                    brand_name1 = brand_name1[0] if len(brand_name1)>0 else ""
                    # print brand_name1
                    status = ''  #
                    pid1_list = oe_div.xpath('./span[@class="code"]/text()')

                    if len(pid1_list) <= 0:
                        print "没有对应的oe号列表, 跳过", local_pid, detail_file_path
                        continue
                    for pid1 in pid1_list:
                        # 组装最终字典
                        last_data = dict(
                            pid=local_pid,
                            brand_name=brand_name,
                            part_brand=self.partBrand,
                            part_num=part_num,
                            part_name=part_name,
                            brand_name1=brand_name1,
                            pid1=pid1,
                            status=status if status else ""
                        )

                        self.save2db(data_dict=last_data, curs=self.curs2)
                        print last_data


                        if self.total_num % 100 == 0:  # 每100条数据提交一次
                            self.conn2.commit()
                            self.r.save()  # 提交一次redis缓存
                            print "提交insert缓存, 当前数量: {}".format(self.total_num)

        self.conn2.commit()
        self.r.save()
        print "最终提交, 数量: {}".format(self.total_num)

        self.curs1.close()
        self.curs2.close()
        self.conn1.close()
        self.conn2.close()


if __name__ == '__main__':
    db_config = dict(
        host="127.0.0.1",
        port=3306,
        user="root",
        passwd="123456",
        db="my_data",
        charset="utf8"
    )
    select_table = '0731_pid_2'  # 查询表
    insert_table = 'brembo_all'  # 插入表
    partBrand = 'brembo'  # 网站品牌
    is_sleep = False  # 控制速度开关

    tools = BremboForPid(db_config=db_config, select_table=select_table, insert_table=insert_table, partBrand=partBrand, is_sleep=is_sleep)
    # tools.r_flushall()  # 重置断点续爬, 重置启动后请立即屏蔽,防止下次不小心又重置
    tools.run()  # 启动