# -*- coding: UTF-8 -*-
# Author: Damon(wuud1)
# CreateDate: 2018-08-07
# Message: 瓦尔塔蓄电池网站数据抓取, 根据网站的车型检索.爬虫的断点续爬功能做了更新, 原有的redis链接,sadd之后,如果还没有save程序就报错
# 的话, 下次启动程序再save的时候会把上次未save的数据也save到redis中,但是可能会造成断点续爬功能遗漏数据.本代码使用pipeline链接,不会有
# 这种情况.如果程序报错,没有save的数据不会存在于缓存中.
import requests
import redis
import os
import sys
import hashlib
import pymysql
import json
from lxml import etree
import time

# 解决python2编码问题
reload(sys)
sys.setdefaultencoding('utf-8')


class VartaForCars(object):
    '''varta蓄电池官网,根据车型查询出所有蓄电池数据'''

    def __init__(self, **kwargs):
        self.db_config = kwargs.get("db_config")  # 数据库配置
        self.select_table = kwargs.get("select_table")  # 查询表名
        self.insert_table = kwargs.get("insert_table")  # 插入表名
        self.partBrand = kwargs.get("partBrand")  # 网站品牌
        self.redis_set = '{}_set'.format(kwargs.get('partBrand'))  # redis数据库集合名,用来存储查询过的pid
        self.conn1 = None  # 数据库链接和游标
        self.conn2 = None
        self.curs1 = None
        self.curs2 = None
        self.base_path = '/Users/duanmengdi/Desktop/workspace/{}_html/'.format(kwargs.get("partBrand"))  # 存储html文件的路径
        self.location = ""  # 重定向链接
        self.total_num = 0  # 全局计数变量
        self.sub_num = 0  # 全局备用计数变量
        self.is_sleep = kwargs.get("is_sleep", True)  # 是否控制速度的开关变量
        self.r = redis.Redis(host='localhost', port=6379, db=0).pipeline()  # redis链接,用于断点续爬


        self.make_list = kwargs.get("make_list")

        # 爬虫用到的一些参数
        self.start_url = 'https://www.varta-automotive.cn/zh-cn'  # 首页
        self.get_model_url = 'https://www.varta-automotive.cn/products/ajaxLookup/?make={}&&get=model&lang=zh_CN&ph=%E9%80%89%E6%8B%A9%E6%B1%BD%E8%BD%A6%E5%93%81%E7%89%8C'
        self.get_engine_url= 'https://www.varta-automotive.cn/products/ajaxLookup/?make={}&model={}&&get=engine&lang=zh_CN&ph=%E9%80%89%E6%8B%A9%E6%B1%BD%E8%BD%A6%E5%9E%8B%E5%8F%B7'

        self.search_url = 'https://www.varta-automotive.cn/zh-cn/battery-finder?make={}&model={}&engine={}&find=find'

        self.headers = {
            "Host": "www.varta-automotive.cn",
            "User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/67.0.3396.99 Safari/537.36",
            "Referer": "https://www.varta-automotive.cn/zh-cn",
            # "": "",
        }

        self.cookies = {
            "CONCRETE5": "p61muhqci0q6g0brv85d6haso5",
            "jcui": "affc2420e09354605f165b09a8488dbb",
            "Hm_lvt_a050aeab4cdfa59279c2b1f88eb31ec4": "1533535397",
            "Hm_lpvt_a050aeab4cdfa59279c2b1f88eb31ec4": "1533535397",
            "_ga": "GA1.2.1015390942.1533535397",
            "_gat_UA-33497049-1": "1",
            "_gat_UA-6709784-1": "1",
            "GA1.2.1905486527.1533535398": "",
            "_gid": "GA1.2.854098710.1533535398",
            "_gat_UA-6709784-5": "1",
            "_gat_UA-6709784-7": "1",
        }

    def getmd5(self, str):
        '''获取md5加密电子指纹'''
        md5 = hashlib.md5()
        md5.update(str)
        return md5.hexdigest()

    def get_conn(self, dbconfig_dict):
        '''获取数据库链接'''
        conn = pymysql.connect(**dbconfig_dict)
        return conn

    def get_cursor(self, conn, type='stream'):
        '''获取数据库游标'''
        if type == 'stream':
            return conn.cursor(pymysql.cursors.SSCursor)  # 返回流式游标,查询大量数据时不占用内存(返回数据形式是元组)
        elif type == 'dict':
            return conn.cursor(pymysql.cursors.DictCursor)  # 返回字典形式游标,查询出的数据以字典形式返回
        else:
            raise Exception("cursor type error")

    def construct_insert_sql(self, table, data, db=None):
        '''传入表名, 数据字典, 返回insert语句'''
        fields = '`' + '`,`'.join(data.keys()) + '`'
        values = []
        for v in data.values():
            if v == 0:
                v = '0'
            elif not v:
                v = ''
            if type(v) == int or type(v) == long:
                values.append(v.__str__())
            elif v == "now()":
                values.append(v)
            else:
                values.append("'%s'" % v.replace("'", " ").replace("\\", "\\\\"))
        if db:
            sql = 'INSERT INTO `%s`.`%s` (%s) VALUES (%s)' % (db, table, fields, ",".join(values))
        else:
            sql = 'INSERT INTO `%s` (%s) VALUES (%s)' % (table, fields, ",".join(values))
        return sql

    def parse_url(self, url, method, post_data=None, headers=None, tofile=True, **kwargs):
        requests.packages.urllib3.disable_warnings()  # 因为把ssl安全验证关了,所以不让代码报不安全的https请求警告
        pid = kwargs.get("local_pid", "")
        charset = kwargs.get("charset", "utf8")  # 暂时没有用到
        allow_redirects = kwargs.get("allow_redirects", False)  # 是否允许重定向, 默认不重定向,获取原始响应
        if method == 'GET':
            data = "GET" + url + pid  # 拼接data
            md5 = self.getmd5(data)  # 获取此次请求电子指纹
            file_path = self.base_path + md5 + ".html"
            if os.path.exists(file_path):  # 判断文件是否存在,如果存在就从本地读取
                print "GET for local"
                with open(file_path, 'r') as f:
                    res = f.read()
                response = res
            else:  # 如果路径不存在, 从web端请求
                print "GET for web"
                try:
                    response = requests.get(url=url, verify=False, headers=headers, cookies=self.cookies, timeout=30,
                                            allow_redirects=allow_redirects)
                except Exception as e:
                    response = requests.get(url=url, verify=False, headers=headers, cookies=self.cookies, timeout=30,
                                            allow_redirects=allow_redirects)

                # 每次请求都获取cookies, 并更新self.cookies(如果获取到cookies的话)
                cookies = response.cookies.get_dict()
                for k in cookies:
                    self.cookies[k] = cookies[k]

                # 获取location,
                location = response.headers.get("Location", None)
                if location:
                    print "响应头中存在location"
                    self.location = location
                    # return response.content.decode('utf8')

                if response:
                    if tofile:
                        with open(file_path, 'w') as f:  # 保存此次请求的html
                            f.write(response.content.decode('utf8'))
                    response = response.content.decode("utf8")
                else:
                    response = None

        elif method == "POST":
            data = "POST" + url + json.dumps(post_data)
            md5 = self.getmd5(data)
            file_path = self.base_path + md5 + ".html"
            if os.path.exists(file_path):  # 判断文件是否存在,如果存在就从本地读取
                print "POST for local"
                with open(file_path, 'r') as f:
                    res = f.read()
                response = res
            else:
                print "POST for web"
                try:
                    response = requests.post(url=url, data=post_data, verify=False, headers=headers,
                                             cookies=self.cookies, timeout=30,
                                             allow_redirects=allow_redirects)
                except Exception as e:
                    response = requests.post(url=url, data=post_data, verify=False, headers=headers,
                                             cookies=self.cookies, timeout=30,
                                             allow_redirects=allow_redirects)

                # 获取cookies, 并更新self.cookies
                cookies = response.cookies.get_dict()
                for k in cookies:
                    self.cookies[k] = cookies[k]

                # 获取location,
                location = response.headers.get("Location", None)
                if location:
                    print "响应头中存在location"
                    self.location = location
                    # response, file_path = self.parse_url(url=location, method="GET", headers=headers, local_pid=pid)
                    # print "已重新请求location地址"
                    # return response, file_path
                    # return response.content.decode('utf8')

                if response:
                    if tofile:
                        with open(file_path, 'w') as f:  # 保存此次请求的html
                            f.write(response.content.decode('utf8'))
                    response = response.content.decode("utf8")

                else:
                    response = None

        return response, file_path

    def r_reset(self):
        self.r.delete(self.redis_set)
        self.r.execute()

    def get_mme(self):
        '''获取所有车型数据,存入数据库,做为后续查询蓄电池数据的条件,
        mme: make, model, engine'''

        # 获取数据库链接,游标
        conn = self.get_conn(self.db_config)
        curs = self.get_cursor(conn)

        # 从开始页获取所有车型列表
        start_response, start_file_path = self.parse_url(url=self.start_url, method="GET", headers=self.headers)
        if start_response:
            start_html = etree.HTML(start_response)
        else:
            raise Exception("没有获取到首页响应")

        make_list = start_html.xpath('//select[@id="product_finder_make"]/option[position()>1]/@value')
        for make in make_list:
            # print make
            if self.is_sleep:  # 控制速度
                time.sleep(1)
            get_model_url = self.get_model_url.format(make)
            print get_model_url
            get_model_response, get_model_file_path = self.parse_url(url=get_model_url, method="GET", headers=self.headers)
            if get_model_response:
                get_model_html = etree.HTML(get_model_response)
            else:
                print make, get_model_file_path
                raise Exception("没有获取到model")
            model_list = get_model_html.xpath('//option[position()>1]/@value')
            for model in model_list:
                # print model
                get_engine_url = self.get_engine_url.format(make, model)
                print get_engine_url
                get_engine_response, get_engine_file_path = self.parse_url(url=get_engine_url, method="GET", headers=self.headers)
                if get_engine_response:
                    get_engine_html = etree.HTML(get_engine_response)
                else:
                    print make, model, get_engine_file_path
                    raise Exception("没有获取到engine")
                engine_list = get_engine_html.xpath('//option[position()>1]/@value')
                for engine in engine_list:
                    # print engine
                    # 组装最终数据字典
                    last_data = dict(
                        make=make,
                        model=model,
                        engine=engine
                    )
                    table = '{}_cars_info'.format(self.partBrand)
                    sql = self.construct_insert_sql(table=table, data=last_data)
                    curs.execute(sql)
                    self.total_num += 1
                    if self.total_num % 100 == 0:
                        conn.commit()
                        print "提交insert缓存, 当前数量: {}".format(self.total_num)
        conn.commit()
        print "最终提交, 总数据量: {}".format(self.total_num)
        curs.close()
        conn.close()

    def get_data(self, select_sql):

        self.curs1.execute(select_sql)
        for data in self.curs1:
            make = data[0]
            model = data[1]
            engine = data[2]
            # 测试用数据
            # make = '路虎 (进口)'
            # model = 'RANGE ROVER EVOQUE'
            # engine = '2.0'

            ### 此处可添加断点续爬功能, 思路:把make,model,engine生成电子指纹,存入redis中,实现断点续爬
            str = make+model+engine
            md5_str = self.getmd5(str)
            # print md5_str
            self.r.sismember(self.redis_set, md5_str)
            if self.r.execute()[0]:
                self.sub_num += 1
                continue
            self.r.sadd(self.redis_set, md5_str)

            if self.is_sleep:
                time.sleep(1)

            search_url = self.search_url.format(make, model, engine)
            print search_url
            try:
                search_response, search_file_path = self.parse_url(url=search_url, method="GET", headers=self.headers)
            except Exception as e:
                search_response, search_file_path = self.parse_url(url=search_url, method="GET", headers=self.headers)

            if search_response:
                search_html = etree.HTML(search_response)
            else:
                raise Exception('没有获取到搜索结果页响应')

            result_div_list = search_html.xpath('//div[@class="results"]/div[@class="single-product-result"]')
            if len(result_div_list) <= 0:
                print "没有找到对应的蓄电池详情列表, 跳过", search_url
                continue
            for result_div in result_div_list:
                part_num = result_div.xpath(u'.//h3/a/text()')
                part_num = part_num[0].replace('\r', '').replace('\n', '').replace('\t', '').replace(' ', '') if len(
                    part_num) > 0 else ""

                capacity = result_div.xpath(u'.//div[contains(text(), "电量")]/following-sibling::div[1]/text()')
                capacity = capacity[0].replace('\r', '').replace('\n', '').replace('\t', '').replace(' ', '') if len(
                    capacity) > 0 else ""

                cold_start = result_div.xpath(u'.//div[contains(text(), "冷启动电流:")]/following-sibling::div[1]/text()')
                cold_start = cold_start[0].replace('\r', '').replace('\n', '').replace('\t', '').replace(' ',
                                                                                                         '') if len(
                    cold_start) > 0 else ""

                remark = result_div.xpath(u'.//div[@class="copy-small"]/text()')
                remark = remark[0].replace('\r', '').replace('\n', '').replace('\t', '').replace(' ', '') if len(
                    remark) > 0 else ""

                print make, model, engine, part_num, capacity, cold_start, remark
                last_data = dict(
                    make=make,
                    model=model,
                    engine=engine,
                    part_brand=self.partBrand,
                    part_num=part_num,
                    capacity=capacity,
                    cold_start=cold_start,
                    remark=remark,
                    status=''
                )
                sql = self.construct_insert_sql(table=self.insert_table, data=last_data)
                self.curs2.execute(sql)
                self.total_num += 1
                self.r.execute()  # 提交redis缓存
                if self.total_num % 100 == 0:
                    self.conn2.commit()

                    print "提交insert缓存, 当前数量: {}".format(self.total_num)

    def run(self):

        # 获取数据库链接
        # 获取一系列数据库链接,游标
        self.conn1 = self.get_conn(dbconfig_dict=self.db_config)
        self.conn2 = self.get_conn(dbconfig_dict=self.db_config)
        self.curs1 = self.get_cursor(conn=self.conn1)
        self.curs2 = self.get_cursor(conn=self.conn2)

        if self.make_list:  # 如果列表有内容,执行列表内的品牌
            for make in self.make_list:
                select_sql = '''select make,model,engine from {} where make="{}"'''.format(self.select_table, make)

                self.get_data(select_sql)
        else:  # 反之执行所有品牌
            select_sql = '''select make,model,engine from {}'''.format(self.select_table)
            self.get_data(select_sql)
        self.conn2.commit()
        # print self.sub_num

        print "最终提交, 总数据量: {}".format(self.total_num)

        self.curs1.close()
        self.curs2.close()
        self.conn1.close()
        self.conn2.close()


if __name__ == '__main__':
    db_config = dict(
        host="127.0.0.1",
        port=3306,
        user="root",
        passwd="123456",
        db="my_data",
        charset="utf8"
    )
    partBrand = 'varta'  # 网站品牌
    is_sleep = False  # 控制速度开关
    select_table = 'varta_cars_info'  # 查询表
    insert_table = 'varta_all'  # 插入表
    make_list = []  # 要获取数据的车型列表,需从数据库中获取, 如列表为空则读取所有
    tools = VartaForCars(db_config=db_config, partBrand=partBrand, is_sleep=is_sleep, select_table=select_table, insert_table=insert_table, make_list=make_list)
    # tools.get_mme()  # 获取网站所有车型数据,并存入数据库,作为后续查询的条件
    # tools.r_reset()  # 重置断点续爬数据, 及时关闭
    tools.run()  # 启动












