# -*- coding: UTF-8 -*-
# Author: Damon(wuud1)
# CreateDate:
# Message:

import requests
import json
from lxml import etree
import time
from collections import OrderedDict
import pymysql
import hashlib
import os
import redis
import sys
# 解决编码问题
reload(sys)
sys.setdefaultencoding('utf-8')

class TRW_For_Pid(object):

    def __init__(self, **kwargs):

        self.db_config = kwargs.get("db_config")  # 数据库配置
        self.select_table = kwargs.get("select_table")  # 查询表名
        self.insert_table = kwargs.get("insert_table")  # 插入表名
        self.conn1 = None  # 数据库链接和游标
        self.conn2 = None
        self.curs1 = None
        self.curs2 = None
        self.base_path = '/Users/duanmengdi/Desktop/workspace/trw_html/'  # 存储html文件的路径
        self.location = ""  # 重定向链接
        self.total_num = 0  # 全局计数变量
        self.r = redis.Redis(host='localhost', port=6379, db=0)  # redis链接,用于断点续爬

        # 断点续爬用到的参数
        self.continue_num = 0
        self.is_print = False

        # 以下为每次请求用到的参数
        self.search_url = 'https://www.trwaftermarket.com/api/CatalogueData?market=cn&vehicleType=P&partNumber={}'
        self.detail_url = 'https://www.trwaftermarket.com/cn/catalogue/product/{}/'

        self.headers = {
            "Host": "www.trwaftermarket.com",
            "X-Current-Language-Code": "zh-CN",
            "User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/67.0.3396.99 Safari/537.36",
            "Referer": "https://www.trwaftermarket.com/cn/catalogue/",
        }

        self.cookies = {
            "epslanguage": "zh-CN",
            "_ga": "GA1.2.1201171004.1531384240",
            "cb-enabled": "enabled",
            "_pk_id.41940141-bc1f-431b-948a-e5c1fe89ed2e.e779": "c511fbb9a8d57c97.1531384242.1.1531384277.1531384242.",
            "stg_last_interaction": "Thu, 12 Jul 2018 08:32:18 GMT",
            "stg_returning_visitor": "Thu, 12 Jul 2018 08:32:18 GMT",
            "ASP.NET_SessionId": "d4inxzfeuhcqxjasbump00o2",
            "EPi:NumberOfVisits": "5,2018-07-12T08:30:36,2018-07-23T08:25:35,2018-07-25T01:55:21,2018-07-25T08:33:44,2018-07-30T02:26:28",
            "cookiesession1": "1CDCB5A5DEVYMVJ0DYQMBDMNUGNDC2C1",
            "_gid": "GA1.2.1390001797.1532917590",
            "_gat": "1",
            "__RequestVerificationToken": "OLkE9JRXKQRQmXsXcLK4i6aaYTpdW3FrTaZ50G6RXyceXifIUUbUaLaLmFI9-m046t4TI1h5JWtRn1NQANJsepS4m1I1"
        }


    def getmd5(self, str):
        md5 = hashlib.md5()
        md5.update(str)
        return md5.hexdigest()

    def get_conn(self, dbconfig_dict):
        conn = pymysql.connect(**dbconfig_dict)
        return conn

    def get_cursor(self, conn, type='stream'):
        if type == 'stream':
            return conn.cursor(pymysql.cursors.SSCursor)  # 返回流式游标,查询大量数据时不占用内存(返回数据形式是元组)
        elif type == 'dict':
            return conn.cursor(pymysql.cursors.DictCursor)  # 返回字典形式游标,查询出的数据以字典形式返回
        else:
            raise Exception("cursor type error")

    def parse_url(self, url, method, post_data=None, headers=None, tofile=True, **kwargs):
        requests.packages.urllib3.disable_warnings()  # 因为把ssl安全验证关了,所以不让代码报不安全的https请求警告
        pid = kwargs.get("local_pid", "")
        charset = kwargs.get("charset", "utf8")  # 暂时没有用到
        if method == 'GET':
            data = "GET" + url + pid  # 拼接data
            md5 = self.getmd5(data)  # 获取此次请求电子指纹
            file_path = self.base_path + md5 + ".html"
            if os.path.exists(file_path):  # 判断文件是否存在,如果存在就从本地读取
                print "GET for local"
                with open(file_path, 'r') as f:
                    res = f.read()
                response = res
            else:  # 如果路径不存在, 从web端请求
                print "GET for web"
                try:
                    response = requests.get(url=url, verify=False, headers=headers, cookies=self.cookies, timeout=30,
                                            allow_redirects=False)
                except Exception as e:
                    response = requests.get(url=url, verify=False, headers=headers, cookies=self.cookies, timeout=30,
                                            allow_redirects=False)

                # 获取cookies, 并更新self.cookies
                cookies = response.cookies.get_dict()
                for k in cookies:
                    self.cookies[k] = cookies[k]

                # 获取location,
                location = response.headers.get("Location", None)
                if location:
                    print "响应头中存在location"
                    self.location = location
                    # return response.content.decode('utf8')

                if response:
                    if tofile:
                        with open(file_path, 'w') as f:  # 保存此次请求的html
                            f.write(response.content.decode('utf8'))
                    response = response.content.decode("utf8")
                else:
                    response = None

        elif method == "POST":
            data = "POST" + url + json.dumps(post_data)
            md5 = self.getmd5(data)
            file_path = self.base_path + md5 + ".html"
            if os.path.exists(file_path):  # 判断文件是否存在,如果存在就从本地读取
                print "POST for local"
                with open(file_path, 'r') as f:
                    res = f.read()
                response = res
            else:
                print "POST for web"
                try:
                    response = requests.post(url=url, data=post_data, verify=False, headers=headers,
                                             cookies=self.cookies, timeout=30,
                                             allow_redirects=False)
                except Exception as e:
                    response = requests.post(url=url, data=post_data, verify=False, headers=headers,
                                             cookies=self.cookies, timeout=30,
                                             allow_redirects=False)

                # 获取cookies, 并更新self.cookies
                cookies = response.cookies.get_dict()
                for k in cookies:
                    self.cookies[k] = cookies[k]

                # 获取location,
                location = response.headers.get("Location", None)
                if location:
                    print "响应头中存在location"
                    self.location = location
                    # return response.content.decode('utf8')

                if response:
                    if tofile:
                        with open(file_path, 'w') as f:  # 保存此次请求的html
                            f.write(response.content.decode('utf8'))
                    response = response.content.decode("utf8")

                else:
                    response = None

        return response, file_path

    def save2db(self, data_dict, curs):
        # 抽取数据
        pid = data_dict['pid'].decode('utf8')
        brand_name = data_dict['brand_name'].decode('utf8')
        part_brand = data_dict['part_brand'].decode('utf8')
        part_num = data_dict['part_num'].decode('utf8')
        part_name = data_dict['part_name'].decode('utf8')
        brand_name1 = data_dict['brand_name1'].decode('utf8')
        pid1 = data_dict['pid1'].decode('utf8')
        status = data_dict['status'].decode('utf8')

        # 拼接sql语句
        sql = '''INSERT INTO `{}` (`pid`, `brand_name`, `part_brand`, `part_num`, `part_name`, `brand_name1`, `pid1`, `status`) VALUES ('%s', '%s', '%s', '%s', '%s', '%s', '%s', '%s')'''.format(self.insert_table) % (
        pid, brand_name, part_brand, part_num, part_name, brand_name1, pid1, status)
        # print sql
        try:
            curs.execute(sql)
        except Exception as e:
            print e
            print sql
            exit()
        self.total_num += 1  # 数据总量计数

    def r_flushall(self):
        self.r.flushdb()
        self.r.save()

    def run(self):

        # 获取一系列数据库链接,游标
        self.conn1 = self.get_conn(dbconfig_dict=self.db_config)
        self.conn2 = self.get_conn(dbconfig_dict=self.db_config)
        self.curs1 = self.get_cursor(conn=self.conn1)
        self.curs2 = self.get_cursor(conn=self.conn2)
        select_sql = '''select pid,brand from {}'''.format(self.select_table)

        self.curs1.execute(select_sql)

        for data in self.curs1:

            local_pid = data[0].replace('.0', '')
            # local_pid = 'SDB000624'  # 写代码时测试用pid, 有结果
            # local_pid = 'LR090684'  # 写代码时测试用pid, 无结果

            # 断点续爬
            if self.r.sismember('pid_set', local_pid):
                self.continue_num += 1  # 跳过pid数量计数
                self.is_print = True  # 打印跳过数量的开关
                continue

            if self.is_print:
                print "启动断点续爬, 跳过数量{}".format(self.continue_num)
                self.is_print = False  # 关闭打印数量

            # time.sleep(1)

            self.r.sadd('pid_set', local_pid)  # 把local_pid 添加到redis里,实现断点续爬

            brand_name = data[1]
            search_url = self.search_url.format(local_pid)
            try:
                search_result_response, search_result_file_path = self.parse_url(search_url, method="GET", headers=self.headers, local_pid=local_pid)
                search_result_json = json.loads(search_result_response)
                productResults = search_result_json.get('productResults', None)
                if productResults == None:
                    print "没有搜索到结果, 跳过", local_pid, search_result_file_path
                    continue

                for productResult in productResults:
                    part_num = productResult['productCode']
                    part_name = productResult['productGroupName']
                    # print part_num, part_name

                    detail_url = self.detail_url.format(part_num)
                    print detail_url

                    detail_response, detail_file_path = self.parse_url(url=detail_url, method="GET", headers=self.headers, local_pid=local_pid)
                    if detail_response:
                        detail_html = etree.HTML(detail_response)
                    else:
                        print "没有获取到详情页响应", local_pid, detail_file_path
                        exit()
                    oe_tr_list = detail_html.xpath('//div[@id="oe-numbers-accordion"]/div/table/tr')
                    if len(oe_tr_list) <= 0:
                        print "没有获取到原厂件号对照表, 跳过", local_pid, detail_file_path
                    else:
                        oe_tr_list.pop(0)  # 去掉第一行非数据行

                    for oe_tr in oe_tr_list:
                        brand_name1 = oe_tr.xpath('./td[1]/text()')
                        brand_name1 = brand_name1[0] if len(brand_name1)>0 else ""
                        pid1 = oe_tr.xpath('./td[2]/text()')
                        pid1 = pid1[0] if len(pid1)>0 else ""
                        # print brand_name1, pid1
                        # 写过滤条件
                        # pass
                        # 组装最后字典
                        last_data = dict(
                            pid=local_pid,
                            brand_name=brand_name,
                            part_brand='trw',
                            part_num=part_num,
                            part_name=part_name,
                            brand_name1=brand_name1,
                            pid1=pid1,
                            status=''
                        )

                        self.save2db(data_dict=last_data,curs=self.curs2)
                        print last_data

                        # 提交一次redis缓存
                        self.r.save()

                        if self.total_num % 100 == 0:
                            self.conn2.commit()
                            print "提交insert缓存, 当前数量: {}".format(self.total_num)
            except Exception as e:
                self.conn2.commit()
                print "爬虫遇到故障,已提交insert缓存"
                raise e
        self.conn2.commit()
        print "最终提交, 数量: {}".format(self.total_num)

        self.curs1.close()
        self.curs2.close()
        self.conn1.close()
        self.conn2.close()


if __name__ == '__main__':

    db_config = dict(
        host="127.0.0.1",
        port=3306,
        user="root",
        passwd="123456",
        db="my_data",
        charset="utf8"
    )
    select_table = '0731_pid_2'
    insert_table = 'trw_all'

    tools = TRW_For_Pid(db_config=db_config, select_table=select_table, insert_table=insert_table)
    tools.r_flushall()
    tools.run()
